aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgbe')
-rw-r--r--drivers/net/cxgbe/Makefile47
-rw-r--r--drivers/net/cxgbe/base/adapter.h169
-rw-r--r--drivers/net/cxgbe/base/common.h205
-rw-r--r--drivers/net/cxgbe/base/t4_chip_type.h34
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c1159
-rw-r--r--drivers/net/cxgbe/base/t4_hw.h38
-rw-r--r--drivers/net/cxgbe/base/t4_msg.h244
-rw-r--r--drivers/net/cxgbe/base/t4_pci_id_tbl.h34
-rw-r--r--drivers/net/cxgbe/base/t4_regs.h172
-rw-r--r--drivers/net/cxgbe/base/t4_regs_values.h34
-rw-r--r--drivers/net/cxgbe/base/t4_tcb.h26
-rw-r--r--drivers/net/cxgbe/base/t4fw_interface.h667
-rw-r--r--drivers/net/cxgbe/base/t4vf_hw.c880
-rw-r--r--drivers/net/cxgbe/base/t4vf_hw.h15
-rw-r--r--drivers/net/cxgbe/clip_tbl.c193
-rw-r--r--drivers/net/cxgbe/clip_tbl.h31
-rw-r--r--drivers/net/cxgbe/cxgbe.h65
-rw-r--r--drivers/net/cxgbe/cxgbe_compat.h55
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c436
-rw-r--r--drivers/net/cxgbe/cxgbe_filter.c1252
-rw-r--r--drivers/net/cxgbe/cxgbe_filter.h235
-rw-r--r--drivers/net/cxgbe/cxgbe_flow.c845
-rw-r--r--drivers/net/cxgbe/cxgbe_flow.h42
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c768
-rw-r--r--drivers/net/cxgbe/cxgbe_ofld.h89
-rw-r--r--drivers/net/cxgbe/cxgbe_pfvf.h45
-rw-r--r--drivers/net/cxgbe/cxgbevf_ethdev.c201
-rw-r--r--drivers/net/cxgbe/cxgbevf_main.c295
-rw-r--r--drivers/net/cxgbe/meson.build14
-rw-r--r--drivers/net/cxgbe/sge.c627
30 files changed, 7982 insertions, 935 deletions
diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile
index 65df1425..5d66c4b3 100644
--- a/drivers/net/cxgbe/Makefile
+++ b/drivers/net/cxgbe/Makefile
@@ -1,33 +1,6 @@
-# BSD LICENSE
-#
-# Copyright(c) 2014-2015 Chelsio Communications.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Chelsio Communications nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2014-2018 Chelsio Communications.
+# All rights reserved.
include $(RTE_SDK)/mk/rte.vars.mk
@@ -45,12 +18,6 @@ EXPORT_MAP := rte_pmd_cxgbe_version.map
LIBABIVER := 1
-ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
-#
-# CFLAGS for icc
-#
-CFLAGS_BASE_DRIVER = -wd188
-else
#
# CFLAGS for gcc/clang
#
@@ -59,9 +26,7 @@ ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
CFLAGS += -Wno-deprecated
endif
endif
-CFLAGS_BASE_DRIVER =
-endif
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
@@ -80,8 +45,14 @@ VPATH += $(SRCDIR)/base
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_main.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += clip_tbl.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index f2057af1..e98dd218 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
/* This file should not be included directly. Include common.h instead. */
@@ -39,12 +11,16 @@
#include <rte_bus_pci.h>
#include <rte_mbuf.h>
#include <rte_io.h>
+#include <rte_rwlock.h>
+#include <rte_ethdev.h>
#include "cxgbe_compat.h"
#include "t4_regs_values.h"
+#include "cxgbe_ofld.h"
enum {
MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */
+ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
};
struct adapter;
@@ -68,6 +44,7 @@ struct port_info {
u8 port_type; /* firmware port type */
u8 mod_type; /* firmware module type */
u8 port_id; /* physical port ID */
+ u8 pidx; /* port index for this PF */
u8 tx_chan; /* associated channel */
u8 n_rx_qsets; /* # of rx qsets */
@@ -77,6 +54,7 @@ struct port_info {
u16 *rss; /* rss table */
u8 rss_mode; /* rss mode */
u16 rss_size; /* size of VI's RSS table slice */
+ u64 rss_hf; /* RSS Hash Function */
};
/* Enable or disable autonegotiation. If this is set to enable,
@@ -196,6 +174,7 @@ struct sge_eth_rxq { /* a SW Ethernet Rx queue */
* scenario where a packet needs 32 bytes.
*/
#define ETH_COALESCE_PKT_NUM 15
+#define ETH_COALESCE_VF_PKT_NUM 7
#define ETH_COALESCE_PKT_PER_DESC 2
struct tx_eth_coal_desc {
@@ -225,6 +204,10 @@ struct eth_coalesce {
unsigned int len;
unsigned int flits;
unsigned int max;
+ __u8 ethmacdst[ETHER_ADDR_LEN];
+ __u8 ethmacsrc[ETHER_ADDR_LEN];
+ __be16 ethtype;
+ __be16 vlantci;
};
struct sge_txq {
@@ -247,6 +230,7 @@ struct sge_txq {
unsigned int equeidx; /* last sent credit request */
unsigned int last_pidx; /* last pidx recorded by tx monitor */
unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
+ unsigned int abs_id;
int db_disabled; /* doorbell state */
unsigned short db_pidx; /* doorbell producer index */
@@ -267,16 +251,27 @@ struct sge_eth_tx_stats { /* Ethernet tx queue statistics */
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
struct sge_txq q;
struct rte_eth_dev *eth_dev; /* port that this queue belongs to */
+ struct rte_eth_dev_data *data;
struct sge_eth_tx_stats stats; /* queue statistics */
rte_spinlock_t txq_lock;
unsigned int flags; /* flags for state of the queue */
} __rte_cache_aligned;
+struct sge_ctrl_txq { /* State for an SGE control Tx queue */
+ struct sge_txq q; /* txq */
+ struct adapter *adapter; /* adapter associated with this queue */
+ rte_spinlock_t ctrlq_lock; /* control queue lock */
+ u8 full; /* the Tx ring is full */
+ u64 txp; /* number of transmits */
+ struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */
+} __rte_cache_aligned;
+
struct sge {
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_rspq fw_evtq __rte_cache_aligned;
+ struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
u16 max_ethqsets; /* # of available Ethernet queue sets */
u32 stat_len; /* length of status page at ring end */
@@ -308,7 +303,7 @@ struct adapter {
struct rte_pci_device *pdev; /* associated rte pci device */
struct rte_eth_dev *eth_dev; /* first port's rte eth device */
struct adapter_params params; /* adapter parameters */
- struct port_info port[MAX_NPORTS]; /* ports belonging to this adapter */
+ struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */
struct sge sge; /* associated SGE */
/* support for single-threading access to adapter mailbox registers */
@@ -325,8 +320,76 @@ struct adapter {
unsigned int vpd_flag;
int use_unpacked_mode; /* unpacked rx mode state */
+ rte_spinlock_t win0_lock;
+
+ unsigned int clipt_start; /* CLIP table start */
+ unsigned int clipt_end; /* CLIP table end */
+ struct clip_tbl *clipt; /* CLIP table */
+
+ struct tid_info tids; /* Info used to access TID related tables */
};
+/**
+ * t4_os_rwlock_init - initialize rwlock
+ * @lock: the rwlock
+ */
+static inline void t4_os_rwlock_init(rte_rwlock_t *lock)
+{
+ rte_rwlock_init(lock);
+}
+
+/**
+ * t4_os_write_lock - get a write lock
+ * @lock: the rwlock
+ */
+static inline void t4_os_write_lock(rte_rwlock_t *lock)
+{
+ rte_rwlock_write_lock(lock);
+}
+
+/**
+ * t4_os_write_unlock - unlock a write lock
+ * @lock: the rwlock
+ */
+static inline void t4_os_write_unlock(rte_rwlock_t *lock)
+{
+ rte_rwlock_write_unlock(lock);
+}
+
+/**
+ * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev
+ * @dev: the rte_eth_dev
+ *
+ * Return the struct port_info associated with a rte_eth_dev
+ */
+static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev)
+{
+ return (struct port_info *)dev->data->dev_private;
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
+static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx)
+{
+ return adap->port[idx];
+}
+
+/**
+ * ethdev2adap - return the adapter structure associated with a rte_eth_dev
+ * @dev: the rte_eth_dev
+ *
+ * Return the struct adapter associated with a rte_eth_dev
+ */
+static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev)
+{
+ return ethdev2pinfo(dev)->adapter;
+}
+
#define CXGBE_PCI_REG(reg) rte_read32(reg)
static inline uint64_t cxgbe_read_addr64(volatile void *addr)
@@ -602,7 +665,7 @@ static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap)
static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx,
u8 hw_addr[])
{
- struct port_info *pi = &adapter->port[port_idx];
+ struct port_info *pi = adap2pinfo(adapter, port_idx);
ether_addr_copy((struct ether_addr *)hw_addr,
&pi->eth_dev->data->mac_addrs[0]);
@@ -688,15 +751,35 @@ static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
}
/**
- * adap2pinfo - return the port_info of a port
- * @adap: the adapter
- * @idx: the port index
+ * t4_init_completion - initialize completion
+ * @c: the completion context
+ */
+static inline void t4_init_completion(struct t4_completion *c)
+{
+ c->done = 0;
+ t4_os_lock_init(&c->lock);
+}
+
+/**
+ * t4_complete - set completion as done
+ * @c: the completion context
+ */
+static inline void t4_complete(struct t4_completion *c)
+{
+ t4_os_lock(&c->lock);
+ c->done = 1;
+ t4_os_unlock(&c->lock);
+}
+
+/**
+ * cxgbe_port_viid - get the VI id of a port
+ * @dev: the device for the port
*
- * Return the port_info structure for the port of the given index.
+ * Return the VI id of the given port.
*/
-static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev)
{
- return &adap->port[idx];
+ return ethdev2pinfo(dev)->viid;
}
void *t4_alloc_mem(size_t size);
@@ -713,12 +796,17 @@ void t4_sge_tx_monitor_start(struct adapter *adap);
void t4_sge_tx_monitor_stop(struct adapter *adap);
int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
uint16_t nb_pkts);
+int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);
int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *gl);
int t4_sge_init(struct adapter *adap);
+int t4vf_sge_init(struct adapter *adap);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct rte_eth_dev *eth_dev, uint16_t queue_id,
unsigned int iqid, int socket_id);
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id);
int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
struct rte_eth_dev *eth_dev, int intr_idx,
struct sge_fl *fl, rspq_handler_t handler,
@@ -735,6 +823,7 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
unsigned int cnt);
int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
unsigned int budget, unsigned int *work_done);
-int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+int cxgbe_write_rss(const struct port_info *pi, const u16 *queues);
+int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags);
#endif /* __T4_ADAPTER_H__ */
diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h
index 1eda57d0..157201da 100644
--- a/drivers/net/cxgbe/base/common.h
+++ b/drivers/net/cxgbe/base/common.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __CHELSIO_COMMON_H
@@ -36,6 +8,7 @@
#include "cxgbe_compat.h"
#include "t4_hw.h"
+#include "t4vf_hw.h"
#include "t4_chip_type.h"
#include "t4fw_interface.h"
@@ -45,6 +18,9 @@ extern "C" {
#define CXGBE_PAGE_SIZE RTE_PGSIZE_4K
+#define T4_MEMORY_WRITE 0
+#define T4_MEMORY_READ 1
+
enum {
MAX_NPORTS = 4, /* max # of ports */
};
@@ -62,18 +38,20 @@ enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
-enum {
+enum cc_pause {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
-enum {
- FEC_RS = 1 << 0,
- FEC_BASER_RS = 1 << 1,
- FEC_RESERVED = 1 << 2,
+enum cc_fec {
+ FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */
+ FEC_RS = 1 << 1, /* Reed-Solomon */
+ FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */
};
+enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
+
struct port_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_frames; /* all good frames */
@@ -178,6 +156,9 @@ struct tp_params {
int vnic_shift;
int port_shift;
int protocol_shift;
+ int ethertype_shift;
+
+ u64 hash_filter_mask;
};
struct vpd_params {
@@ -209,12 +190,59 @@ struct arch_specific_params {
u16 mps_tcam_size;
};
+/*
+ * Global Receive Side Scaling (RSS) parameters in host-native format.
+ */
+struct rss_params {
+ unsigned int mode; /* RSS mode */
+ union {
+ struct {
+ uint synmapen:1; /* SYN Map Enable */
+ uint syn4tupenipv6:1; /* en 4-tuple IPv6 SYNs hash */
+ uint syn2tupenipv6:1; /* en 2-tuple IPv6 SYNs hash */
+ uint syn4tupenipv4:1; /* en 4-tuple IPv4 SYNs hash */
+ uint syn2tupenipv4:1; /* en 2-tuple IPv4 SYNs hash */
+ uint ofdmapen:1; /* Offload Map Enable */
+ uint tnlmapen:1; /* Tunnel Map Enable */
+ uint tnlalllookup:1; /* Tunnel All Lookup */
+ uint hashtoeplitz:1; /* use Toeplitz hash */
+ } basicvirtual;
+ } u;
+};
+
+/*
+ * Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int neq; /* N egress Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+};
+
+/*
+ * Maximum resources provisioned for a PCI VF.
+ */
+struct vf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct adapter_params {
struct sge_params sge;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
struct devlog_params devlog;
+ struct rss_params rss;
+ struct pf_resources pfres;
+ struct vf_resources vfres;
enum pcie_memwin drv_memwin;
unsigned int sf_size; /* serial flash size in bytes */
@@ -235,23 +263,46 @@ struct adapter_params {
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
+ unsigned char hash_filter;
+
enum chip_type chip; /* chip code */
struct arch_specific_params arch; /* chip specific params */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ u8 fw_caps_support; /* 32-bit Port Capabilities */
+};
+
+/* Firmware Port Capabilities types.
+ */
+typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */
+typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */
+
+enum fw_caps {
+ FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */
+ FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */
+ FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */
};
struct link_config {
- unsigned short supported; /* link capabilities */
- unsigned short advertising; /* advertised capabilities */
- unsigned int requested_speed; /* speed user has requested */
- unsigned int speed; /* actual link speed */
- unsigned char requested_fc; /* flow control user has requested */
- unsigned char fc; /* actual link flow control */
- unsigned char requested_fec; /* Forward Error Correction user */
- unsigned char fec; /* has requested and actual FEC */
- unsigned char autoneg; /* autonegotiating? */
- unsigned char link_ok; /* link up? */
+ fw_port_cap32_t pcaps; /* link capabilities */
+ fw_port_cap32_t acaps; /* advertised capabilities */
+
+ u32 requested_speed; /* speed (Mb/s) user has requested */
+ u32 speed; /* actual link speed (Mb/s) */
+
+ enum cc_pause requested_fc; /* flow control user has requested */
+ enum cc_pause fc; /* actual link flow control */
+
+ enum cc_fec auto_fec; /* Forward Error Correction
+ * "automatic" (IEEE 802.3)
+ */
+ enum cc_fec requested_fec; /* Forward Error Correction requested */
+ enum cc_fec fec; /* Forward Error Correction actual */
+
+ unsigned char autoneg; /* autonegotiating? */
+
+ unsigned char link_ok; /* link up? */
+ unsigned char link_down_rc; /* link down reason */
};
#include "adapter.h"
@@ -269,9 +320,19 @@ static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
delay, NULL);
}
+static inline int is_pf4(struct adapter *adap)
+{
+ return adap->pf == 4;
+}
+
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
+static inline int is_hashfilter(const struct adapter *adap)
+{
+ return adap->params.hash_filter;
+}
+
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val);
@@ -285,9 +346,12 @@ int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
enum dev_master master, enum dev_state *state);
int t4_fw_bye(struct adapter *adap, unsigned int mbox);
int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4vf_fw_reset(struct adapter *adap);
int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);
int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
int t4_fl_pkt_align(struct adapter *adap);
+int t4vf_fl_pkt_align(struct adapter *adap, u32 sge_control, u32 sge_control2);
+int t4vf_get_vfres(struct adapter *adap);
int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size,
enum chip_type chip_compat);
@@ -297,6 +361,13 @@ int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int nparams, const u32 *params,
u32 *val);
+int t4vf_query_params(struct adapter *adap, unsigned int nparams,
+ const u32 *params, u32 *vals);
+int t4vf_get_dev_params(struct adapter *adap);
+int t4vf_get_vpd_params(struct adapter *adap);
+int t4vf_get_rss_glb_config(struct adapter *adap);
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals);
int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
unsigned int pf, unsigned int vf,
unsigned int nparams, const u32 *params,
@@ -331,6 +402,8 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int fl0id, unsigned int fl1id);
int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
@@ -379,6 +452,21 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
}
+int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
+
+static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
+}
+
+static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
+}
+
+
void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, u32 *vals, unsigned int nregs,
unsigned int start_idx);
@@ -387,6 +475,7 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int nregs, unsigned int start_idx);
int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_pfres(struct adapter *adapter);
int t4_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t4_flash_cfg_addr(struct adapter *adapter);
@@ -394,22 +483,34 @@ unsigned int t4_get_mps_bg_map(struct adapter *adapter, unsigned int pidx);
unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct port_stats *p);
void t4_get_port_stats_offset(struct adapter *adap, int idx,
struct port_stats *stats,
struct port_stats *offset);
void t4_clr_port_stats(struct adapter *adap, int idx);
+void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
+ fw_port_cap32_t acaps);
void t4_reset_link_config(struct adapter *adap, int idx);
int t4_get_version_info(struct adapter *adapter);
void t4_dump_version_info(struct adapter *adapter);
int t4_get_flash_params(struct adapter *adapter);
int t4_get_chip_type(struct adapter *adap, int ver);
int t4_prep_adapter(struct adapter *adapter);
+int t4vf_prep_adapter(struct adapter *adapter);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+int t4vf_port_init(struct adapter *adap);
int t4_init_rss_mode(struct adapter *adap, int mbox);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
unsigned int flags, unsigned int defq);
+int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ u64 *flags, unsigned int *defq);
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw);
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx);
+void t4_read_rss_key(struct adapter *adap, u32 *key);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
@@ -421,8 +522,20 @@ int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
unsigned int t4_get_regs_len(struct adapter *adap);
+unsigned int t4vf_get_pf_from_vf(struct adapter *adap);
void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
int t4_seeprom_wp(struct adapter *adapter, int enable);
+int t4_memory_rw_addr(struct adapter *adap, int win,
+ u32 addr, u32 len, void *hbuf, int dir);
+int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
+ u32 len, void *hbuf, int dir);
+static inline int t4_memory_rw(struct adapter *adap, int win,
+ int mtype, u32 maddr, u32 len,
+ void *hbuf, int dir)
+{
+ return t4_memory_rw_mtype(adap, win, mtype, maddr, len, hbuf, dir);
+}
+fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/cxgbe/base/t4_chip_type.h b/drivers/net/cxgbe/base/t4_chip_type.h
index cd7a9282..c0c5d0b2 100644
--- a/drivers/net/cxgbe/base/t4_chip_type.h
+++ b/drivers/net/cxgbe/base/t4_chip_type.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_CHIP_TYPE_H__
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 56f38c83..31762c9c 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <netinet/in.h>
@@ -55,9 +27,6 @@
#include "t4_regs_values.h"
#include "t4fw_interface.h"
-static void init_link_config(struct link_config *lc, unsigned int pcaps,
- unsigned int acaps);
-
/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
* @adap: the adapter
@@ -2167,6 +2136,91 @@ int t4_seeprom_wp(struct adapter *adapter, int enable)
}
/**
+ * t4_fw_tp_pio_rw - Access TP PIO through LDST
+ * @adap: the adapter
+ * @vals: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_idx: index of first indirect register to read/write
+ * @rw: Read (1) or Write (0)
+ *
+ * Access TP PIO registers through LDST
+ */
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw)
+{
+ int cmd = FW_LDST_ADDRSPC_TP_PIO;
+ struct fw_ldst_cmd c;
+ unsigned int i;
+ int ret;
+
+ for (i = 0 ; i < nregs; i++) {
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+ F_FW_CMD_REQUEST |
+ (rw ? F_FW_CMD_READ :
+ F_FW_CMD_WRITE) |
+ V_FW_LDST_CMD_ADDRSPACE(cmd));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+ c.u.addrval.addr = cpu_to_be32(start_index + i);
+ c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ if (rw)
+ vals[i] = be32_to_cpu(c.u.addrval.val);
+ }
+ }
+}
+
+/**
+ * t4_read_rss_key - read the global RSS key
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ *
+ * Reads the global 320-bit RSS key.
+ */
+void t4_read_rss_key(struct adapter *adap, u32 *key)
+{
+ t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
+}
+
+/**
+ * t4_write_rss_key - program one of the RSS keys
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ * @idx: which RSS key to write
+ *
+ * Writes one of the RSS keys with the given 320-bit value. If @idx is
+ * 0..15 the corresponding entry in the RSS key table is written,
+ * otherwise the global RSS key is written.
+ */
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
+{
+ u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
+ u8 rss_key_addr_cnt = 16;
+
+ /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+ * allows access to key addresses 16-63 by using KeyWrAddrX
+ * as index[5:4](upper 2) into key table
+ */
+ if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+ (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
+ rss_key_addr_cnt = 32;
+
+ t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
+
+ if (idx >= 0 && idx < rss_key_addr_cnt) {
+ if (rss_key_addr_cnt > 16)
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDRX(idx >> 4) |
+ V_T6_VFWRADDR(idx) | F_KEYWREN);
+ else
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDR(idx) | F_KEYWREN);
+ }
+}
+
+/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
* @mbox: mbox to use for the FW command
@@ -2257,7 +2311,11 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
* Send this portion of the RRS table update to the firmware;
* bail out on any errors.
*/
- ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
+ if (is_pf4(adapter))
+ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
+ NULL);
+ else
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
if (ret)
return ret;
}
@@ -2287,7 +2345,44 @@ int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
- return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adapter))
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_read_config_vi_rss - read the configured per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: where to place the configured flags
+ * @defq: where to place the id of the default RSS queue for the VI.
+ *
+ * Read configured VI-specific RSS properties.
+ */
+int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ u64 *flags, unsigned int *defq)
+{
+ struct fw_rss_vi_config_cmd c;
+ unsigned int result;
+ int ret;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
+ if (!ret) {
+ result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
+ if (defq)
+ *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
+ if (flags)
+ *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
+ }
+
+ return ret;
}
/**
@@ -2385,6 +2480,46 @@ int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
return 0;
}
+/**
+ * t4_get_pfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a physical
+ * function. The results are stored in @adapter->pfres.
+ */
+int t4_get_pfres(struct adapter *adapter)
+{
+ struct pf_resources *pfres = &adapter->params.pfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ u32 word;
+ int v;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PFVF_CMD_PFN(adapter->pf) |
+ V_FW_PFVF_CMD_VFN(0));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Extract PF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ pfres->neq = G_FW_PFVF_CMD_NEQ(word);
+ return 0;
+}
+
/* serial flash and firmware constants and flash config file constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -2670,14 +2805,142 @@ void t4_dump_version_info(struct adapter *adapter)
G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
}
-#define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
- FW_PORT_CAP_ANEG)
+#define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \
+ FW_PORT_CAP32_ANEG)
+/**
+ * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
+ * @caps16: a 16-bit Port Capabilities value
+ *
+ * Returns the equivalent 32-bit Port Capabilities value.
+ */
+fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
+{
+ fw_port_cap32_t caps32 = 0;
+
+#define CAP16_TO_CAP32(__cap) \
+ do { \
+ if (caps16 & FW_PORT_CAP_##__cap) \
+ caps32 |= FW_PORT_CAP32_##__cap; \
+ } while (0)
+
+ CAP16_TO_CAP32(SPEED_100M);
+ CAP16_TO_CAP32(SPEED_1G);
+ CAP16_TO_CAP32(SPEED_25G);
+ CAP16_TO_CAP32(SPEED_10G);
+ CAP16_TO_CAP32(SPEED_40G);
+ CAP16_TO_CAP32(SPEED_100G);
+ CAP16_TO_CAP32(FC_RX);
+ CAP16_TO_CAP32(FC_TX);
+ CAP16_TO_CAP32(ANEG);
+ CAP16_TO_CAP32(MDIX);
+ CAP16_TO_CAP32(MDIAUTO);
+ CAP16_TO_CAP32(FEC_RS);
+ CAP16_TO_CAP32(FEC_BASER_RS);
+ CAP16_TO_CAP32(802_3_PAUSE);
+ CAP16_TO_CAP32(802_3_ASM_DIR);
+
+#undef CAP16_TO_CAP32
+
+ return caps32;
+}
+
+/**
+ * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
+ * @caps32: a 32-bit Port Capabilities value
+ *
+ * Returns the equivalent 16-bit Port Capabilities value. Note that
+ * not all 32-bit Port Capabilities can be represented in the 16-bit
+ * Port Capabilities and some fields/values may not make it.
+ */
+static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
+{
+ fw_port_cap16_t caps16 = 0;
+
+#define CAP32_TO_CAP16(__cap) \
+ do { \
+ if (caps32 & FW_PORT_CAP32_##__cap) \
+ caps16 |= FW_PORT_CAP_##__cap; \
+ } while (0)
+
+ CAP32_TO_CAP16(SPEED_100M);
+ CAP32_TO_CAP16(SPEED_1G);
+ CAP32_TO_CAP16(SPEED_10G);
+ CAP32_TO_CAP16(SPEED_25G);
+ CAP32_TO_CAP16(SPEED_40G);
+ CAP32_TO_CAP16(SPEED_100G);
+ CAP32_TO_CAP16(FC_RX);
+ CAP32_TO_CAP16(FC_TX);
+ CAP32_TO_CAP16(802_3_PAUSE);
+ CAP32_TO_CAP16(802_3_ASM_DIR);
+ CAP32_TO_CAP16(ANEG);
+ CAP32_TO_CAP16(MDIX);
+ CAP32_TO_CAP16(MDIAUTO);
+ CAP32_TO_CAP16(FEC_RS);
+ CAP32_TO_CAP16(FEC_BASER_RS);
+
+#undef CAP32_TO_CAP16
+
+ return caps16;
+}
+
+/* Translate Firmware Pause specification to Common Code */
+static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
+{
+ enum cc_pause cc_pause = 0;
+
+ if (fw_pause & FW_PORT_CAP32_FC_RX)
+ cc_pause |= PAUSE_RX;
+ if (fw_pause & FW_PORT_CAP32_FC_TX)
+ cc_pause |= PAUSE_TX;
+
+ return cc_pause;
+}
+
+/* Translate Common Code Pause Frame specification into Firmware */
+static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
+{
+ fw_port_cap32_t fw_pause = 0;
+
+ if (cc_pause & PAUSE_RX)
+ fw_pause |= FW_PORT_CAP32_FC_RX;
+ if (cc_pause & PAUSE_TX)
+ fw_pause |= FW_PORT_CAP32_FC_TX;
+
+ return fw_pause;
+}
+
+/* Translate Firmware Forward Error Correction specification to Common Code */
+static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
+{
+ enum cc_fec cc_fec = 0;
+
+ if (fw_fec & FW_PORT_CAP32_FEC_RS)
+ cc_fec |= FEC_RS;
+ if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
+ cc_fec |= FEC_BASER_RS;
+
+ return cc_fec;
+}
+
+/* Translate Common Code Forward Error Correction specification to Firmware */
+static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
+{
+ fw_port_cap32_t fw_fec = 0;
+
+ if (cc_fec & FEC_RS)
+ fw_fec |= FW_PORT_CAP32_FEC_RS;
+ if (cc_fec & FEC_BASER_RS)
+ fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
+
+ return fw_fec;
+}
/**
* t4_link_l1cfg - apply link configuration to MAC/PHY
- * @phy: the PHY to setup
- * @mac: the MAC to setup
- * @lc: the requested link configuration
+ * @adapter: the adapter
+ * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
*
* Set up a port's MAC and PHY according to a desired link configuration.
* - If the PHY can auto-negotiate first decide what to advertise, then
@@ -2689,48 +2952,60 @@ void t4_dump_version_info(struct adapter *adapter)
int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
struct link_config *lc)
{
- struct fw_port_cmd c;
- unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
- unsigned int fc, fec;
+ unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
+ unsigned int fw_caps = adap->params.fw_caps_support;
+ fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
+ struct fw_port_cmd cmd;
lc->link_ok = 0;
- fc = 0;
- if (lc->requested_fc & PAUSE_RX)
- fc |= FW_PORT_CAP_FC_RX;
- if (lc->requested_fc & PAUSE_TX)
- fc |= FW_PORT_CAP_FC_TX;
-
- fec = 0;
- if (lc->requested_fec & FEC_RS)
- fec |= FW_PORT_CAP_FEC_RS;
- if (lc->requested_fec & FEC_BASER_RS)
- fec |= FW_PORT_CAP_FEC_BASER_RS;
- if (lc->requested_fec & FEC_RESERVED)
- fec |= FW_PORT_CAP_FEC_RESERVED;
- memset(&c, 0, sizeof(c));
- c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
- F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
- V_FW_PORT_CMD_PORTID(port));
- c.action_to_len16 =
- cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
- FW_LEN16(c));
-
- if (!(lc->supported & FW_PORT_CAP_ANEG)) {
- c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
- fc | fec);
+ fw_fc = cc_to_fwcap_pause(lc->requested_fc);
+
+ /* Convert Common Code Forward Error Control settings into the
+ * Firmware's API. If the current Requested FEC has "Automatic"
+ * (IEEE 802.3) specified, then we use whatever the Firmware
+ * sent us as part of it's IEEE 802.3-based interpratation of
+ * the Transceiver Module EPROM FEC parameters. Otherwise we
+ * use whatever is in the current Requested FEC settings.
+ */
+ if (lc->requested_fec & FEC_AUTO)
+ cc_fec = lc->auto_fec;
+ else
+ cc_fec = lc->requested_fec;
+ fw_fec = cc_to_fwcap_fec(cc_fec);
+
+ /* Figure out what our Requested Port Capabilities are going to be.
+ */
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
+ rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
- lc->fec = lc->requested_fec;
+ lc->fec = cc_fec;
} else if (lc->autoneg == AUTONEG_DISABLE) {
- c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc |
- fec | mdi);
+ rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
- lc->fec = lc->requested_fec;
+ lc->fec = cc_fec;
} else {
- c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | fec | mdi);
+ rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ /* And send that on to the Firmware ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_PORT_CMD_PORTID(port));
+ cmd.action_to_len16 =
+ cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
+ FW_PORT_ACTION_L1_CFG :
+ FW_PORT_ACTION_L1_CFG32) |
+ FW_LEN16(cmd));
+
+ if (fw_caps == FW_CAPS16)
+ cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
+ else
+ cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
+
+ return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL);
}
/**
@@ -3823,12 +4098,17 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) |
- V_FW_VI_CMD_VFN(vf));
+ F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
+ V_FW_VI_CMD_VFN(vf));
c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
}
/**
@@ -3874,7 +4154,11 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
- return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+ if (is_pf4(adap))
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
+ sleep_ok);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
}
/**
@@ -3921,7 +4205,10 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
V_FW_VI_MAC_CMD_IDX(idx));
memcpy(p->macaddr, addr, sizeof(p->macaddr));
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret == 0) {
ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
if (ret >= max_mac_addr)
@@ -3955,7 +4242,10 @@ int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
V_FW_VI_ENABLE_CMD_EEN(tx_en) |
V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
FW_LEN16(c));
- return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adap))
+ return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
}
/**
@@ -3996,15 +4286,20 @@ int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
- V_FW_IQ_CMD_VFN(vf));
+ F_FW_CMD_EXEC);
c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
V_FW_IQ_CMD_IQSTOP(!start) |
FW_LEN16(c));
c.iqid = cpu_to_be16(iqid);
c.fl0id = cpu_to_be16(fl0id);
c.fl1id = cpu_to_be16(fl1id);
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adap)) {
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ } else {
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+ }
}
/**
@@ -4028,14 +4323,19 @@ int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
- V_FW_IQ_CMD_VFN(vf));
+ F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
c.iqid = cpu_to_be16(iqid);
c.fl0id = cpu_to_be16(fl0id);
c.fl1id = cpu_to_be16(fl1id);
- return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
}
/**
@@ -4055,11 +4355,203 @@ int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
memset(&c, 0, sizeof(c));
c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
- F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
- V_FW_EQ_ETH_CMD_PFN(pf) |
- V_FW_EQ_ETH_CMD_VFN(vf));
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_link_down_rc_str - return a string for a Link Down Reason Code
+ * @link_down_rc: Link Down Reason Code
+ *
+ * Returns a string representation of the Link Down Reason Code.
+ */
+static const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+ static const char * const reason[] = {
+ "Link Down",
+ "Remote Fault",
+ "Auto-negotiation Failure",
+ "Reserved",
+ "Insufficient Airflow",
+ "Unable To Determine Reason",
+ "No RX Signal Detected",
+ "Reserved",
+ };
+
+ if (link_down_rc >= ARRAY_SIZE(reason))
+ return "Bad Reason Code";
+
+ return reason[link_down_rc];
+}
+
+/* Return the highest speed set in the port capabilities, in Mb/s. */
+static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
+{
+#define TEST_SPEED_RETURN(__caps_speed, __speed) \
+ do { \
+ if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
+ return __speed; \
+ } while (0)
+
+ TEST_SPEED_RETURN(100G, 100000);
+ TEST_SPEED_RETURN(50G, 50000);
+ TEST_SPEED_RETURN(40G, 40000);
+ TEST_SPEED_RETURN(25G, 25000);
+ TEST_SPEED_RETURN(10G, 10000);
+ TEST_SPEED_RETURN(1G, 1000);
+ TEST_SPEED_RETURN(100M, 100);
+
+#undef TEST_SPEED_RETURN
+
+ return 0;
+}
+
+/**
+ * t4_handle_get_port_info - process a FW reply message
+ * @pi: the port info
+ * @rpl: start of the FW message
+ *
+ * Processes a GET_PORT_INFO FW reply message.
+ */
+static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+{
+ const struct fw_port_cmd *cmd = (const void *)rpl;
+ int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
+ fw_port_cap32_t pcaps, acaps, linkattr;
+ struct link_config *lc = &pi->link_cfg;
+ struct adapter *adapter = pi->adapter;
+ enum fw_port_module_type mod_type;
+ enum fw_port_type port_type;
+ unsigned int speed, fc, fec;
+ int link_ok, linkdnrc;
+
+ /* Extract the various fields from the Port Information message.
+ */
+ switch (action) {
+ case FW_PORT_ACTION_GET_PORT_INFO: {
+ u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
+
+ link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
+ linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
+ pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
+ acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
+
+ /* Unfortunately the format of the Link Status in the old
+ * 16-bit Port Information message isn't the same as the
+ * 16-bit Port Capabilities bitfield used everywhere else ...
+ */
+ linkattr = 0;
+ if (lstatus & F_FW_PORT_CMD_RXPAUSE)
+ linkattr |= FW_PORT_CAP32_FC_RX;
+ if (lstatus & F_FW_PORT_CMD_TXPAUSE)
+ linkattr |= FW_PORT_CAP32_FC_TX;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ linkattr |= FW_PORT_CAP32_SPEED_100M;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ linkattr |= FW_PORT_CAP32_SPEED_1G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ linkattr |= FW_PORT_CAP32_SPEED_10G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
+ linkattr |= FW_PORT_CAP32_SPEED_25G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ linkattr |= FW_PORT_CAP32_SPEED_40G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
+ linkattr |= FW_PORT_CAP32_SPEED_100G;
+
+ break;
+ }
+
+ case FW_PORT_ACTION_GET_PORT_INFO32: {
+ u32 lstatus32 =
+ be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
+
+ link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
+ linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
+ pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
+ acaps = be32_to_cpu(cmd->u.info32.acaps32);
+ linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
+ break;
+ }
+
+ default:
+ dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
+ be32_to_cpu(cmd->action_to_len16));
+ return;
+ }
+
+ fec = fwcap_to_cc_fec(acaps);
+
+ fc = fwcap_to_cc_pause(linkattr);
+ speed = fwcap_to_speed(linkattr);
+
+ if (mod_type != pi->mod_type) {
+ lc->auto_fec = fec;
+ pi->port_type = port_type;
+ pi->mod_type = mod_type;
+ t4_os_portmod_changed(adapter, pi->pidx);
+ }
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc || fec != lc->fec) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ lc->link_down_rc = linkdnrc;
+ dev_warn(adap, "Port %d link down, reason: %s\n",
+ pi->tx_chan, t4_link_down_rc_str(linkdnrc));
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ lc->fec = fec;
+ lc->pcaps = pcaps;
+ lc->acaps = acaps & ADVERT_MASK;
+
+ if (lc->acaps & FW_PORT_CAP32_ANEG) {
+ lc->autoneg = AUTONEG_ENABLE;
+ } else {
+ /* When Autoneg is disabled, user needs to set
+ * single speed.
+ * Similar to cxgb4_ethtool.c: set_link_ksettings
+ */
+ lc->acaps = 0;
+ lc->requested_speed = fwcap_to_speed(acaps);
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+ }
+}
+
+/**
+ * t4_ctrl_eq_free - free a control egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ctrl_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(pf) |
+ V_FW_EQ_CTRL_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+ c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
@@ -4084,67 +4576,21 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
unsigned int action =
G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
- if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
+ if (opcode == FW_PORT_CMD &&
+ (action == FW_PORT_ACTION_GET_PORT_INFO ||
+ action == FW_PORT_ACTION_GET_PORT_INFO32)) {
/* link/module state change message */
- unsigned int speed = 0, fc = 0, i;
int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
struct port_info *pi = NULL;
- struct link_config *lc;
- u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
- int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
- u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
-
- if (stat & F_FW_PORT_CMD_RXPAUSE)
- fc |= PAUSE_RX;
- if (stat & F_FW_PORT_CMD_TXPAUSE)
- fc |= PAUSE_TX;
- if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
- speed = ETH_SPEED_NUM_100M;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
- speed = ETH_SPEED_NUM_1G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
- speed = ETH_SPEED_NUM_10G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
- speed = ETH_SPEED_NUM_25G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
- speed = ETH_SPEED_NUM_40G;
- else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
- speed = ETH_SPEED_NUM_100G;
+ int i;
for_each_port(adap, i) {
pi = adap2pinfo(adap, i);
if (pi->tx_chan == chan)
break;
}
- lc = &pi->link_cfg;
- if (mod != pi->mod_type) {
- pi->mod_type = mod;
- t4_os_portmod_changed(adap, i);
- }
- if (link_ok != lc->link_ok || speed != lc->speed ||
- fc != lc->fc) { /* something changed */
- if (!link_ok && lc->link_ok) {
- static const char * const reason[] = {
- "Link Down",
- "Remote Fault",
- "Auto-negotiation Failure",
- "Reserved",
- "Insufficient Airflow",
- "Unable To Determine Reason",
- "No RX Signal Detected",
- "Reserved",
- };
- unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat);
-
- dev_warn(adap, "Port %d link down, reason: %s\n",
- chan, reason[rc]);
- }
- lc->link_ok = link_ok;
- lc->speed = speed;
- lc->fc = fc;
- lc->supported = be16_to_cpu(p->u.info.pcap);
- }
+ t4_handle_get_port_info(pi, rpl);
} else {
dev_warn(adap, "Unknown firmware reply %d\n", opcode);
return -EINVAL;
@@ -4173,12 +4619,10 @@ void t4_reset_link_config(struct adapter *adap, int idx)
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/flow-control/autonegotiation settings.
*/
-static void init_link_config(struct link_config *lc, unsigned int pcaps,
- unsigned int acaps)
+void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
+ fw_port_cap32_t acaps)
{
- unsigned int fec;
-
- lc->supported = pcaps;
+ lc->pcaps = pcaps;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = 0;
@@ -4188,21 +4632,16 @@ static void init_link_config(struct link_config *lc, unsigned int pcaps,
* For Forward Error Control, we default to whatever the Firmware
* tells us the Link is currently advertising.
*/
- fec = 0;
- if (acaps & FW_PORT_CAP_FEC_RS)
- fec |= FEC_RS;
- if (acaps & FW_PORT_CAP_FEC_BASER_RS)
- fec |= FEC_BASER_RS;
- if (acaps & FW_PORT_CAP_FEC_RESERVED)
- fec |= FEC_RESERVED;
- lc->requested_fec = fec;
- lc->fec = fec;
-
- if (lc->supported & FW_PORT_CAP_ANEG) {
- lc->advertising = lc->supported & ADVERT_MASK;
+ lc->auto_fec = fwcap_to_cc_fec(acaps);
+ lc->requested_fec = FEC_AUTO;
+ lc->fec = lc->auto_fec;
+
+ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
+ lc->acaps = lc->pcaps & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
} else {
- lc->advertising = 0;
+ lc->acaps = 0;
lc->autoneg = AUTONEG_DISABLE;
}
}
@@ -4242,9 +4681,8 @@ struct flash_desc {
int t4_get_flash_params(struct adapter *adapter)
{
/*
- * Table for non-Numonix supported flash parts. Numonix parts are left
- * to the preexisting well-tested code. All flash parts have 64KB
- * sectors.
+ * Table for non-standard supported Flash parts. Note, all Flash
+ * parts must have 64KB sectors.
*/
static struct flash_desc supported_flash[] = {
{ 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
@@ -4253,7 +4691,7 @@ int t4_get_flash_params(struct adapter *adapter)
int ret;
u32 flashid = 0;
unsigned int part, manufacturer;
- unsigned int density, size;
+ unsigned int density, size = 0;
/**
* Issue a Read ID Command to the Flash part. We decode supported
@@ -4268,6 +4706,9 @@ int t4_get_flash_params(struct adapter *adapter)
if (ret < 0)
return ret;
+ /**
+ * Check to see if it's one of our non-standard supported Flash parts.
+ */
for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
if (supported_flash[part].vendor_and_model_id == flashid) {
adapter->params.sf_size =
@@ -4278,6 +4719,15 @@ int t4_get_flash_params(struct adapter *adapter)
}
}
+ /**
+ * Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
manufacturer = flashid & 0xff;
switch (manufacturer) {
case 0x20: { /* Micron/Numonix */
@@ -4314,21 +4764,81 @@ int t4_get_flash_params(struct adapter *adapter)
case 0x22:
size = 1 << 28; /* 256MB */
break;
- default:
- dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
+ break;
+ }
- adapter->params.sf_size = size;
- adapter->params.sf_nsec = size / SF_SEC_SIZE;
+ case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
+ /**
+ * This Density -> Size decoding table is taken from ISSI
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x16:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x17:
+ size = 1 << 26; /* 64MB */
+ break;
+ }
break;
}
- default:
- dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
- return -EINVAL;
+
+ case 0xc2: { /* Macronix */
+ /**
+ * This Density -> Size decoding table is taken from Macronix
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
}
+ case 0xef: { /* Winbond */
+ /**
+ * This Density -> Size decoding table is taken from Winbond
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
+ }
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ dev_warn(adapter,
+ "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
+ }
+
+ /**
+ * Store decoded Flash size and fall through into vetting code.
+ */
+ adapter->params.sf_size = size;
+ adapter->params.sf_nsec = size / SF_SEC_SIZE;
+
found:
/*
* We should reject adapters with FLASHes which are too small. So, emit
@@ -4633,6 +5143,8 @@ int t4_init_tp_params(struct adapter *adap)
adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
F_PROTOCOL);
+ adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
+ F_ETHERTYPE);
/*
* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
@@ -4641,6 +5153,11 @@ int t4_init_tp_params(struct adapter *adap)
if ((adap->params.tp.ingress_config & F_VNIC) == 0)
adap->params.tp.vnic_shift = -1;
+ v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask = v;
+ v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask |= ((u64)v << 32);
+
return 0;
}
@@ -4723,47 +5240,305 @@ int t4_init_rss_mode(struct adapter *adap, int mbox)
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
- u8 addr[6];
+ unsigned int fw_caps = adap->params.fw_caps_support;
+ fw_port_cap32_t pcaps, acaps;
+ enum fw_port_type port_type;
+ struct fw_port_cmd cmd;
int ret, i, j = 0;
- struct fw_port_cmd c;
+ int mdio_addr;
+ u32 action;
+ u8 addr[6];
- memset(&c, 0, sizeof(c));
+ memset(&cmd, 0, sizeof(cmd));
for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
unsigned int rss_size = 0;
- struct port_info *p = adap2pinfo(adap, i);
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
- F_FW_CMD_REQUEST | F_FW_CMD_READ |
- V_FW_PORT_CMD_PORTID(j));
- c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(
- FW_PORT_ACTION_GET_PORT_INFO) |
- FW_LEN16(c));
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ /* If we haven't yet determined whether we're talking to
+ * Firmware which knows the new 32-bit Port Capabilities, it's
+ * time to find out now. This will also tell new Firmware to
+ * send us Port Status Updates using the new 32-bit Port
+ * Capabilities version of the Port Information message.
+ */
+ if (fw_caps == FW_CAPS_UNKNOWN) {
+ u32 param, val, caps;
+
+ caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32;
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X(caps));
+ val = 1;
+ ret = t4_set_params(adap, mbox, pf, vf, 1, &param,
+ &val);
+ fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16;
+ adap->params.fw_caps_support = fw_caps;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(j));
+ action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO :
+ FW_PORT_ACTION_GET_PORT_INFO32;
+ cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
+ FW_LEN16(cmd));
+ ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
if (ret)
return ret;
+ /* Extract the various fields from the Port Information message.
+ */
+ if (fw_caps == FW_CAPS16) {
+ u32 lstatus =
+ be32_to_cpu(cmd.u.info.lstatus_to_modtype);
+
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ?
+ (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1;
+ pcaps = be16_to_cpu(cmd.u.info.pcap);
+ acaps = be16_to_cpu(cmd.u.info.acap);
+ pcaps = fwcaps16_to_caps32(pcaps);
+ acaps = fwcaps16_to_caps32(acaps);
+ } else {
+ u32 lstatus32 =
+ be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
+
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
+ (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
+ -1;
+ pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
+ acaps = be32_to_cpu(cmd.u.info32.acaps32);
+ }
+
ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
if (ret < 0)
return ret;
- p->viid = ret;
- p->tx_chan = j;
- p->rss_size = rss_size;
+ pi->viid = ret;
+ pi->tx_chan = j;
+ pi->rss_size = rss_size;
t4_os_set_hw_addr(adap, i, addr);
- ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
- p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
- G_FW_PORT_CMD_MDIOADDR(ret) : -1;
- p->port_type = G_FW_PORT_CMD_PTYPE(ret);
- p->mod_type = FW_PORT_MOD_TYPE_NA;
+ pi->port_type = port_type;
+ pi->mdio_addr = mdio_addr;
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
- init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap),
- be16_to_cpu(c.u.info.acap));
+ init_link_config(&pi->link_cfg, pcaps, acaps);
j++;
}
return 0;
}
+
+/**
+ * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @addr: address within adapter memory
+ * @len: amount of memory to transfer
+ * @hbuf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address and host buffer must be aligned on 32-bit
+ * boudaries; the length may be arbitrary.
+ *
+ * NOTES:
+ * 1. The memory is transferred as a raw byte sequence from/to the
+ * firmware's memory. If this memory contains data structures which
+ * contain multi-byte integers, it's the caller's responsibility to
+ * perform appropriate byte order conversions.
+ *
+ * 2. It is the Caller's responsibility to ensure that no other code
+ * uses the specified PCI-E Memory Window while this routine is
+ * using it. This is typically done via the use of OS-specific
+ * locks, etc.
+ */
+int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
+ u32 len, void *hbuf, int dir)
+{
+ u32 pos, offset, resid;
+ u32 win_pf, mem_reg, mem_aperture, mem_base;
+ u32 *buf;
+
+ /* Argument sanity checks ...*/
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
+ return -EINVAL;
+ buf = (u32 *)hbuf;
+
+ /* It's convenient to be able to handle lengths which aren't a
+ * multiple of 32-bits because we often end up transferring files to
+ * the firmware. So we'll handle that by normalizing the length here
+ * and then handling any residual transfer at the end.
+ */
+ resid = len & 0x3;
+ len -= resid;
+
+ /* Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ win));
+ mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
+ mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
+
+ win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
+
+ /* Calculate our initial PCI-E Memory Window Position and Offset into
+ * that Window.
+ */
+ pos = addr & ~(mem_aperture - 1);
+ offset = addr - pos;
+
+ /* Set up initial PCI-E Memory Window to cover the start of our
+ * transfer. (Read it back to ensure that changes propagate before we
+ * attempt to use the new value.)
+ */
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
+ pos | win_pf);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
+
+ /* Transfer data to/from the adapter as long as there's an integral
+ * number of 32-bit transfers to complete.
+ *
+ * A note on Endianness issues:
+ *
+ * The "register" reads and writes below from/to the PCI-E Memory
+ * Window invoke the standard adapter Big-Endian to PCI-E Link
+ * Little-Endian "swizzel." As a result, if we have the following
+ * data in adapter memory:
+ *
+ * Memory: ... | b0 | b1 | b2 | b3 | ...
+ * Address: i+0 i+1 i+2 i+3
+ *
+ * Then a read of the adapter memory via the PCI-E Memory Window
+ * will yield:
+ *
+ * x = readl(i)
+ * 31 0
+ * [ b3 | b2 | b1 | b0 ]
+ *
+ * If this value is stored into local memory on a Little-Endian system
+ * it will show up correctly in local memory as:
+ *
+ * ( ..., b0, b1, b2, b3, ... )
+ *
+ * But on a Big-Endian system, the store will show up in memory
+ * incorrectly swizzled as:
+ *
+ * ( ..., b3, b2, b1, b0, ... )
+ *
+ * So we need to account for this in the reads and writes to the
+ * PCI-E Memory Window below by undoing the register read/write
+ * swizzels.
+ */
+ while (len > 0) {
+ if (dir == T4_MEMORY_READ)
+ *buf++ = le32_to_cpu((__le32)t4_read_reg(adap,
+ mem_base +
+ offset));
+ else
+ t4_write_reg(adap, mem_base + offset,
+ (u32)cpu_to_le32(*buf++));
+ offset += sizeof(__be32);
+ len -= sizeof(__be32);
+
+ /* If we've reached the end of our current window aperture,
+ * move the PCI-E Memory Window on to the next. Note that
+ * doing this here after "len" may be 0 allows us to set up
+ * the PCI-E Memory Window for a possible final residual
+ * transfer below ...
+ */
+ if (offset == mem_aperture) {
+ pos += mem_aperture;
+ offset = 0;
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+ win), pos | win_pf);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+ win));
+ }
+ }
+
+ /* If the original transfer had a length which wasn't a multiple of
+ * 32-bits, now's where we need to finish off the transfer of the
+ * residual amount. The PCI-E Memory Window has already been moved
+ * above (if necessary) to cover this final transfer.
+ */
+ if (resid) {
+ union {
+ u32 word;
+ char byte[4];
+ } last;
+ unsigned char *bp;
+ int i;
+
+ if (dir == T4_MEMORY_READ) {
+ last.word = le32_to_cpu((__le32)t4_read_reg(adap,
+ mem_base +
+ offset));
+ for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
+ bp[i] = last.byte[i];
+ } else {
+ last.word = *buf;
+ for (i = resid; i < 4; i++)
+ last.byte[i] = 0;
+ t4_write_reg(adap, mem_base + offset,
+ (u32)cpu_to_le32(last.word));
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @maddr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @hbuf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
+ * provides an (memory type, address within memory type) interface.
+ */
+int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
+ u32 len, void *hbuf, int dir)
+{
+ u32 mtype_offset;
+ u32 edc_size, mc_size;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
+ * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
+ */
+ edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1) {
+ mtype_offset = (mtype * (edc_size * 1024 * 1024));
+ } else {
+ mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
+ A_MA_EXT_MEMORY0_BAR));
+ mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ return t4_memory_rw_addr(adap, win,
+ mtype_offset + maddr, len,
+ hbuf, dir);
+}
diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h
index 07498841..e77563df 100644
--- a/drivers/net/cxgbe/base/t4_hw.h
+++ b/drivers/net/cxgbe/base/t4_hw.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_HW_H
@@ -70,6 +42,10 @@ enum {
SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / SGE_EQ_IDXSIZE,
};
+enum {
+ TCB_SIZE = 128, /* TCB size */
+};
+
struct sge_qstat { /* data written to SGE queue status entries */
__be32 qid;
__be16 cidx;
diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h
index 6acd749a..5d433c91 100644
--- a/drivers/net/cxgbe/base/t4_msg.h
+++ b/drivers/net/cxgbe/base/t4_msg.h
@@ -1,40 +1,21 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef T4_MSG_H
#define T4_MSG_H
enum {
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_TID_RELEASE = 0x1A,
+ CPL_ACT_OPEN_RPL = 0x25,
+ CPL_ABORT_RPL_RSS = 0x2D,
+ CPL_SET_TCB_RPL = 0x3A,
+ CPL_ACT_OPEN_REQ6 = 0x83,
CPL_SGE_EGR_UPDATE = 0xA5,
CPL_FW4_MSG = 0xC0,
CPL_FW6_MSG = 0xE0,
@@ -42,6 +23,20 @@ enum {
CPL_TX_PKT_XT = 0xEE,
};
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_FULL = 3,
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+};
+
enum { /* TX_PKT_XT checksum types */
TX_CSUM_TCPIP = 8,
TX_CSUM_UDPIP = 9,
@@ -53,6 +48,24 @@ union opcode_tid {
__u8 opcode;
};
+#define S_CPL_OPCODE 24
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_CPL_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(be32_to_cpu(OPCODE_TID(cmd))))
+
+/* partitioning of TID fields that also carry a queue id */
+#define S_TID_TID 0
+#define M_TID_TID 0x3fff
+#define G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID)
+
struct rss_header {
__u8 opcode;
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
@@ -94,6 +107,169 @@ struct work_request_hdr {
#define WR_HDR_SIZE 0
#endif
+#define S_COOKIE 5
+#define M_COOKIE 0x7
+#define V_COOKIE(x) ((x) << S_COOKIE)
+#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
+
+/* option 0 fields */
+#define S_TX_CHAN 2
+#define V_TX_CHAN(x) ((x) << S_TX_CHAN)
+
+#define S_DELACK 5
+#define V_DELACK(x) ((x) << S_DELACK)
+
+#define S_NON_OFFLOAD 7
+#define V_NON_OFFLOAD(x) ((x) << S_NON_OFFLOAD)
+#define F_NON_OFFLOAD V_NON_OFFLOAD(1U)
+
+#define S_ULP_MODE 8
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+
+#define S_SMAC_SEL 28
+#define V_SMAC_SEL(x) ((__u64)(x) << S_SMAC_SEL)
+
+#define S_TCAM_BYPASS 48
+#define V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS V_TCAM_BYPASS(1ULL)
+
+/* option 2 fields */
+#define S_RSS_QUEUE 0
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+
+#define S_RSS_QUEUE_VALID 10
+#define V_RSS_QUEUE_VALID(x) ((x) << S_RSS_QUEUE_VALID)
+#define F_RSS_QUEUE_VALID V_RSS_QUEUE_VALID(1U)
+
+#define S_CONG_CNTRL 14
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+
+#define S_RX_CHANNEL 26
+#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
+#define F_RX_CHANNEL V_RX_CHANNEL(1U)
+
+#define S_CCTRL_ECN 27
+#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
+
+#define S_T5_OPT_2_VALID 31
+#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
+#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U)
+
+struct cpl_t6_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+struct cpl_t6_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+#define S_FILTER_TUPLE 24
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+
+struct cpl_act_open_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 atid_status;
+};
+
+/* cpl_act_open_rpl.atid_status fields */
+#define S_AOPEN_STATUS 0
+#define M_AOPEN_STATUS 0xFF
+#define G_AOPEN_STATUS(x) (((x) >> S_AOPEN_STATUS) & M_AOPEN_STATUS)
+
+#define S_AOPEN_ATID 8
+#define M_AOPEN_ATID 0xFFFFFF
+#define G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID)
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+ __be64 mask;
+ __be64 val;
+};
+
+/* cpl_set_tcb_field.word_cookie fields */
+#define S_WORD 0
+#define V_WORD(x) ((x) << S_WORD)
+
+/* cpl_get_tcb.reply_ctrl fields */
+#define S_QUEUENO 0
+#define V_QUEUENO(x) ((x) << S_QUEUENO)
+
+#define S_REPLY_CHAN 14
+#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
+
+#define S_NO_REPLY 15
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+
+struct cpl_set_tcb_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __u8 cookie;
+ __u8 status;
+ __be64 oldval;
+};
+
+/* cpl_abort_req status command code
+ */
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
struct cpl_tx_data {
union opcode_tid ot;
__be32 len;
@@ -299,7 +475,13 @@ struct cpl_fw6_msg {
__be64 data[4];
};
+/* ULP_TX opcodes */
+enum {
+ ULP_TX_PKT = 4
+};
+
enum {
+ ULP_TX_SC_NOOP = 0x80,
ULP_TX_SC_IMM = 0x81,
ULP_TX_SC_DSGL = 0x82,
ULP_TX_SC_ISGL = 0x83
diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
index 1230e738..5f5cbe04 100644
--- a/drivers/net/cxgbe/base/t4_pci_id_tbl.h
+++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_PCI_ID_TBL_H__
diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h
index 1100e16f..6f872edc 100644
--- a/drivers/net/cxgbe/base/t4_regs.h
+++ b/drivers/net/cxgbe/base/t4_regs.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#define MYPF_BASE 0x1b000
@@ -77,6 +49,7 @@
#define SGE_BASE_ADDR 0x1000
#define A_SGE_PF_KDOORBELL 0x0
+#define A_SGE_VF_KDOORBELL 0x0
#define S_QID 15
#define M_QID 0x1ffffU
@@ -103,6 +76,9 @@
#define A_SGE_PF_GTS 0x4
+#define T4VF_SGE_BASE_ADDR 0x0000
+#define A_SGE_VF_GTS 0x4
+
#define S_INGRESSQID 16
#define M_INGRESSQID 0xffffU
#define V_INGRESSQID(x) ((x) << S_INGRESSQID)
@@ -191,6 +167,8 @@
#define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0)
#define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0)
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014
+
#define S_ERR_CPL_EXCEED_IQE_SIZE 22
#define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE)
#define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U)
@@ -280,6 +258,11 @@
#define A_SGE_CONM_CTRL 0x1094
+#define S_T6_EGRTHRESHOLDPACKING 16
+#define M_T6_EGRTHRESHOLDPACKING 0xffU
+#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & \
+ M_T6_EGRTHRESHOLDPACKING)
+
#define S_EGRTHRESHOLD 8
#define M_EGRTHRESHOLD 0x3fU
#define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD)
@@ -370,6 +353,7 @@
#define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5)
#define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8
#define A_SGE_CONTROL2 0x1124
@@ -443,6 +427,8 @@
/* registers for module CIM */
#define CIM_BASE_ADDR 0x7b00
+#define A_CIM_VF_EXT_MAILBOX_CTRL 0x0
+
#define A_CIM_PF_MAILBOX_DATA 0x240
#define A_CIM_PF_MAILBOX_CTRL 0x280
@@ -462,6 +448,8 @@
#define V_UPCRST(x) ((x) << S_UPCRST)
#define F_UPCRST V_UPCRST(1U)
+#define NUM_CIM_PF_MAILBOX_DATA_INSTANCES 16
+
/* registers for module TP */
#define A_TP_OUT_CONFIG 0x7d04
@@ -470,6 +458,7 @@
#define F_CRXPKTENC V_CRXPKTENC(1U)
#define TP_BASE_ADDR 0x7d00
+#define A_TP_CMM_TCB_BASE 0x7d10
#define A_TP_TIMER_RESOLUTION 0x7d90
@@ -503,9 +492,34 @@
#define V_MTUVALUE(x) ((x) << S_MTUVALUE)
#define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE)
+#define A_TP_RSS_CONFIG_VRT 0x7e00
+
+#define S_KEYMODE 6
+#define M_KEYMODE 0x3U
+#define G_KEYMODE(x) (((x) >> S_KEYMODE) & M_KEYMODE)
+
+#define S_KEYWRADDR 0
+#define V_KEYWRADDR(x) ((x) << S_KEYWRADDR)
+
+#define S_KEYWREN 4
+#define V_KEYWREN(x) ((x) << S_KEYWREN)
+#define F_KEYWREN V_KEYWREN(1U)
+
+#define S_KEYWRADDRX 30
+#define V_KEYWRADDRX(x) ((x) << S_KEYWRADDRX)
+
+#define S_KEYEXTEND 26
+#define V_KEYEXTEND(x) ((x) << S_KEYEXTEND)
+#define F_KEYEXTEND V_KEYEXTEND(1U)
+
+#define S_T6_VFWRADDR 8
+#define V_T6_VFWRADDR(x) ((x) << S_T6_VFWRADDR)
+
#define A_TP_PIO_ADDR 0x7e40
#define A_TP_PIO_DATA 0x7e44
+#define A_TP_RSS_SECRET_KEY0 0x40
+
#define A_TP_VLAN_PRI_MAP 0x140
#define S_FRAGMENTATION 9
@@ -558,8 +572,27 @@
#define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR)
#define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U)
+#define S_RM_OVLAN 9
+#define V_RM_OVLAN(x) ((x) << S_RM_OVLAN)
+
+/* registers for module MA */
+#define A_MA_EDRAM0_BAR 0x77c0
+
+#define S_EDRAM0_SIZE 0
+#define M_EDRAM0_SIZE 0xfffU
+#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE)
+#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE)
+
+#define A_MA_EXT_MEMORY0_BAR 0x77c8
+
+#define S_EXT_MEM0_SIZE 0
+#define M_EXT_MEM0_SIZE 0xfffU
+#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE)
+#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE)
+
/* registers for module MPS */
#define MPS_BASE_ADDR 0x9000
+#define T4VF_MPS_BASE_ADDR 0x0100
#define S_REPLICATE 11
#define V_REPLICATE(x) ((x) << S_REPLICATE)
@@ -766,6 +799,69 @@
#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
+#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
+#define A_MPS_VF_STAT_TX_VF_MCAST_BYTES_L 0x90
+#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_L 0x98
+#define A_MPS_VF_STAT_TX_VF_UCAST_BYTES_L 0xa0
+#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_L 0xa8
+#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_L 0xb0
+#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_L 0xd0
+#define A_MPS_VF_STAT_RX_VF_MCAST_FRAMES_L 0xe0
+#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0
+#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8
+
+#define A_MPS_PORT0_RX_IVLAN 0x3011c
+
+#define S_IVLAN_ETYPE 0
+#define M_IVLAN_ETYPE 0xffffU
+#define V_IVLAN_ETYPE(x) ((x) << S_IVLAN_ETYPE)
+
+#define MPS_PORT_RX_IVLAN_STRIDE 0x4000
+#define MPS_PORT_RX_IVLAN(idx) \
+ (A_MPS_PORT0_RX_IVLAN + (idx) * MPS_PORT_RX_IVLAN_STRIDE)
+
+#define A_MPS_PORT0_RX_OVLAN0 0x30120
+
+#define S_OVLAN_MASK 16
+#define M_OVLAN_MASK 0xffffU
+#define V_OVLAN_MASK(x) ((x) << S_OVLAN_MASK)
+
+#define S_OVLAN_ETYPE 0
+#define M_OVLAN_ETYPE 0xffffU
+#define V_OVLAN_ETYPE(x) ((x) << S_OVLAN_ETYPE)
+
+#define MPS_PORT_RX_OVLAN_STRIDE 0x4000
+#define MPS_PORT_RX_OVLAN_BASE(idx) \
+(A_MPS_PORT0_RX_OVLAN0 + (idx) * MPS_PORT_RX_OVLAN_STRIDE)
+#define MPS_PORT_RX_OVLAN_REG(idx, reg) (MPS_PORT_RX_OVLAN_BASE(idx) + (reg))
+
+#define A_RX_OVLAN0 0x0
+#define A_RX_OVLAN1 0x4
+#define A_RX_OVLAN2 0x8
+
+#define A_MPS_PORT0_RX_CTL 0x30100
+
+#define S_OVLAN_EN0 0
+#define V_OVLAN_EN0(x) ((x) << S_OVLAN_EN0)
+#define F_OVLAN_EN0 V_OVLAN_EN0(1)
+
+#define S_OVLAN_EN1 1
+#define V_OVLAN_EN1(x) ((x) << S_OVLAN_EN1)
+#define F_OVLAN_EN1 V_OVLAN_EN1(1)
+
+#define S_OVLAN_EN2 2
+#define V_OVLAN_EN2(x) ((x) << S_OVLAN_EN2)
+#define F_OVLAN_EN2 V_OVLAN_EN2(1)
+
+#define S_IVLAN_EN 4
+#define V_IVLAN_EN(x) ((x) << S_IVLAN_EN)
+#define F_IVLAN_EN V_IVLAN_EN(1)
+
+#define MPS_PORT_RX_CTL_STRIDE 0x4000
+#define MPS_PORT_RX_CTL(idx) \
+ (A_MPS_PORT0_RX_CTL + (idx) * MPS_PORT_RX_CTL_STRIDE)
+
/* registers for module ULP_RX */
#define ULP_RX_BASE_ADDR 0x19150
@@ -823,6 +919,7 @@
#define F_PFCIM V_PFCIM(1U)
#define A_PL_WHOAMI 0x19400
+#define A_PL_VF_WHOAMI 0x0
#define A_PL_RST 0x19428
@@ -837,8 +934,21 @@
#define F_PIORSTMODE V_PIORSTMODE(1U)
#define A_PL_REV 0x1943c
+#define A_PL_VF_REV 0x4
#define S_REV 0
#define M_REV 0xfU
#define V_REV(x) ((x) << S_REV)
#define G_REV(x) (((x) >> S_REV) & M_REV)
+
+/* registers for module LE */
+#define A_LE_DB_CONFIG 0x19c04
+
+#define S_HASHEN 20
+#define V_HASHEN(x) ((x) << S_HASHEN)
+#define F_HASHEN V_HASHEN(1U)
+
+#define A_LE_DB_TID_HASHBASE 0x19df8
+
+#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac
+#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0
diff --git a/drivers/net/cxgbe/base/t4_regs_values.h b/drivers/net/cxgbe/base/t4_regs_values.h
index 9085ff6d..a9414d20 100644
--- a/drivers/net/cxgbe/base/t4_regs_values.h
+++ b/drivers/net/cxgbe/base/t4_regs_values.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef __T4_REGS_VALUES_H__
diff --git a/drivers/net/cxgbe/base/t4_tcb.h b/drivers/net/cxgbe/base/t4_tcb.h
new file mode 100644
index 00000000..25435f9f
--- /dev/null
+++ b/drivers/net/cxgbe/base/t4_tcb.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _T4_TCB_DEFS_H
+#define _T4_TCB_DEFS_H
+
+/* 105:96 */
+#define W_TCB_RSS_INFO 3
+#define S_TCB_RSS_INFO 0
+#define M_TCB_RSS_INFO 0x3ffULL
+#define V_TCB_RSS_INFO(x) ((x) << S_TCB_RSS_INFO)
+
+/* 191:160 */
+#define W_TCB_TIMESTAMP 5
+#define S_TCB_TIMESTAMP 0
+#define M_TCB_TIMESTAMP 0xffffffffULL
+#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
+
+/* 223:192 */
+#define S_TCB_T_RTT_TS_RECENT_AGE 0
+#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
+#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
+
+#endif /* _T4_TCB_DEFS_H */
diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h
index 6ca4f318..e80b58a3 100644
--- a/drivers/net/cxgbe/base/t4fw_interface.h
+++ b/drivers/net/cxgbe/base/t4fw_interface.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef _T4FW_INTERFACE_H_
@@ -82,8 +54,13 @@ enum fw_memtype {
********************************/
enum fw_wr_opcodes {
+ FW_FILTER_WR = 0x02,
+ FW_ULPTX_WR = 0x04,
+ FW_TP_WR = 0x05,
FW_ETH_TX_PKT_WR = 0x08,
FW_ETH_TX_PKTS_WR = 0x09,
+ FW_ETH_TX_PKT_VM_WR = 0x11,
+ FW_ETH_TX_PKTS_VM_WR = 0x12,
FW_ETH_TX_PKTS2_WR = 0x78,
};
@@ -102,6 +79,11 @@ struct fw_wr_hdr {
#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP)
#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP)
+/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER
+ */
+#define S_FW_WR_ATOMIC 23
+#define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC)
+
/* work request immediate data length (hi)
*/
#define S_FW_WR_IMMDLEN 0
@@ -118,6 +100,11 @@ struct fw_wr_hdr {
#define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ)
#define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U)
+/* flow context identifier (lo)
+ */
+#define S_FW_WR_FLOWID 8
+#define V_FW_WR_FLOWID(x) ((x) << S_FW_WR_FLOWID)
+
/* length in units of 16-bytes (lo)
*/
#define S_FW_WR_LEN16 0
@@ -146,6 +133,173 @@ struct fw_eth_tx_pkts_wr {
__u8 type;
};
+struct fw_eth_tx_pkt_vm_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be32 r3[2];
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+struct fw_eth_tx_pkts_vm_wr {
+ __be32 op_pkd;
+ __be32 equiq_to_len16;
+ __be32 r3;
+ __be16 plen;
+ __u8 npkt;
+ __u8 r4;
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+ FW_FILTER_WR_SUCCESS,
+ FW_FILTER_WR_FLT_ADDED,
+ FW_FILTER_WR_FLT_DELETED,
+ FW_FILTER_WR_SMT_TBL_FULL,
+ FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+};
+
+#define S_FW_FILTER_WR_TID 12
+#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE 11
+#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
+
+#define S_FW_FILTER_WR_NOREPLY 10
+#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
+
+#define S_FW_FILTER_WR_IQ 0
+#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER 31
+#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID 25
+#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
+
+#define S_FW_FILTER_WR_DROP 24
+#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
+
+#define S_FW_FILTER_WR_DIRSTEER 23
+#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
+
+#define S_FW_FILTER_WR_MASKHASH 22
+#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH 21
+#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+
+#define S_FW_FILTER_WR_LPBK 20
+#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
+
+#define S_FW_FILTER_WR_DMAC 19
+#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
+
+#define S_FW_FILTER_WR_INSVLAN 17
+#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
+
+#define S_FW_FILTER_WR_RMVLAN 16
+#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
+
+#define S_FW_FILTER_WR_HITCNTS 15
+#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
+
+#define S_FW_FILTER_WR_TXCHAN 13
+#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO 12
+#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
+
+#define S_FW_FILTER_WR_L2TIX 0
+#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG 7
+#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
+
+#define S_FW_FILTER_WR_FRAGM 6
+#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
+
+#define S_FW_FILTER_WR_IVLAN_VLD 5
+#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
+
+#define S_FW_FILTER_WR_OVLAN_VLD 4
+#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM 3
+#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM 2
+#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+
+#define S_FW_FILTER_WR_RX_CHAN 15
+#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ 0
+#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI 23
+#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM 14
+#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE 13
+#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
+
+#define S_FW_FILTER_WR_FCOEM 12
+#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
+
+#define S_FW_FILTER_WR_PORT 9
+#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM 6
+#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE 3
+#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM 0
+#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+
/******************************************************************************
* C O M M A N D s
*********************/
@@ -171,24 +325,34 @@ struct fw_eth_tx_pkts_wr {
#define FW_CMD_HELLO_RETRIES 3
enum fw_cmd_opcodes {
+ FW_LDST_CMD = 0x01,
FW_RESET_CMD = 0x03,
FW_HELLO_CMD = 0x04,
FW_BYE_CMD = 0x05,
FW_INITIALIZE_CMD = 0x06,
FW_CAPS_CONFIG_CMD = 0x07,
FW_PARAMS_CMD = 0x08,
+ FW_PFVF_CMD = 0x09,
FW_IQ_CMD = 0x10,
FW_EQ_ETH_CMD = 0x12,
+ FW_EQ_CTRL_CMD = 0x13,
FW_VI_CMD = 0x14,
FW_VI_MAC_CMD = 0x15,
FW_VI_RXMODE_CMD = 0x16,
FW_VI_ENABLE_CMD = 0x17,
+ FW_VI_STATS_CMD = 0x1a,
FW_PORT_CMD = 0x1b,
FW_RSS_IND_TBL_CMD = 0x20,
+ FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_CLIP_CMD = 0x28,
FW_DEBUG_CMD = 0x81,
};
+enum fw_cmd_cap {
+ FW_CMD_CAP_PORT = 0x04,
+};
+
/*
* Generic command header flit0
*/
@@ -238,6 +402,94 @@ struct fw_cmd_hdr {
#define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16)
+/* address spaces
+ */
+enum fw_ldst_addrspc {
+ FW_LDST_ADDRSPC_TP_PIO = 0x0010,
+};
+
+struct fw_ldst_cmd {
+ __be32 op_to_addrspace;
+ __be32 cycles_to_len16;
+ union fw_ldst {
+ struct fw_ldst_addrval {
+ __be32 addr;
+ __be32 val;
+ } addrval;
+ struct fw_ldst_idctxt {
+ __be32 physid;
+ __be32 msg_ctxtflush;
+ __be32 ctxt_data7;
+ __be32 ctxt_data6;
+ __be32 ctxt_data5;
+ __be32 ctxt_data4;
+ __be32 ctxt_data3;
+ __be32 ctxt_data2;
+ __be32 ctxt_data1;
+ __be32 ctxt_data0;
+ } idctxt;
+ struct fw_ldst_mdio {
+ __be16 paddr_mmd;
+ __be16 raddr;
+ __be16 vctl;
+ __be16 rval;
+ } mdio;
+ struct fw_ldst_mps {
+ __be16 fid_ctl;
+ __be16 rplcpf_pkd;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ __be32 atrb;
+ __be16 vlan[16];
+ } mps;
+ struct fw_ldst_func {
+ __u8 access_ctl;
+ __u8 mod_index;
+ __be16 ctl_id;
+ __be32 offset;
+ __be64 data0;
+ __be64 data1;
+ } func;
+ struct fw_ldst_pcie {
+ __u8 ctrl_to_fn;
+ __u8 bnum;
+ __u8 r;
+ __u8 ext_r;
+ __u8 select_naccess;
+ __u8 pcie_fn;
+ __be16 nset_pkd;
+ __be32 data[12];
+ } pcie;
+ struct fw_ldst_i2c_deprecated {
+ __u8 pid_pkd;
+ __u8 base;
+ __u8 boffset;
+ __u8 data;
+ __be32 r9;
+ } i2c_deprecated;
+ struct fw_ldst_i2c {
+ __u8 pid;
+ __u8 did;
+ __u8 boffset;
+ __u8 blen;
+ __be32 r9;
+ __u8 data[48];
+ } i2c;
+ struct fw_ldst_le {
+ __be32 index;
+ __be32 r9;
+ __u8 val[33];
+ __u8 r11[7];
+ } le;
+ } u;
+};
+
+#define S_FW_LDST_CMD_ADDRSPACE 0
+#define M_FW_LDST_CMD_ADDRSPACE 0xff
+#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
+
struct fw_reset_cmd {
__be32 op_to_write;
__be32 retval_len16;
@@ -386,6 +638,7 @@ struct fw_caps_config_cmd {
enum fw_params_mnem {
FW_PARAMS_MNEM_DEV = 1, /* device params */
FW_PARAMS_MNEM_PFVF = 2, /* function params */
+ FW_PARAMS_MNEM_REG = 3, /* limited register access */
FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
};
@@ -395,6 +648,12 @@ enum fw_params_mnem {
enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
+ FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
+ * allocated by the device's
+ * Lookup Engine
+ */
+ FW_PARAMS_PARAM_DEV_FWREV = 0x0B, /* fw version */
+ FW_PARAMS_PARAM_DEV_TPREV = 0x0C, /* tp version */
FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
};
@@ -402,7 +661,12 @@ enum fw_params_param_dev {
* physical and virtual function parameters
*/
enum fw_params_param_pfvf {
- FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31
+ FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
+ FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
+ FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
+ FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
+ FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
+ FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A
};
/*
@@ -443,6 +707,10 @@ enum fw_params_param_dmaq {
#define G_FW_PARAMS_PARAM_YZ(x) \
(((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ)
+#define S_FW_PARAMS_PARAM_XYZ 0
+#define M_FW_PARAMS_PARAM_XYZ 0xffffff
+#define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ)
+
struct fw_params_cmd {
__be32 op_to_vfn;
__be32 retval_len16;
@@ -464,6 +732,74 @@ struct fw_params_cmd {
#define G_FW_PARAMS_CMD_VFN(x) \
(((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN)
+struct fw_pfvf_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ __be32 niqflint_niq;
+ __be32 type_to_neq;
+ __be32 tc_to_nexactf;
+ __be32 r_caps_to_nethctrl;
+ __be16 nricq;
+ __be16 nriqp;
+ __be32 r4;
+};
+
+#define S_FW_PFVF_CMD_PFN 8
+#define V_FW_PFVF_CMD_PFN(x) ((x) << S_FW_PFVF_CMD_PFN)
+
+#define S_FW_PFVF_CMD_VFN 0
+#define V_FW_PFVF_CMD_VFN(x) ((x) << S_FW_PFVF_CMD_VFN)
+
+#define S_FW_PFVF_CMD_NIQFLINT 20
+#define M_FW_PFVF_CMD_NIQFLINT 0xfff
+#define G_FW_PFVF_CMD_NIQFLINT(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT)
+
+#define S_FW_PFVF_CMD_NIQ 0
+#define M_FW_PFVF_CMD_NIQ 0xfffff
+#define G_FW_PFVF_CMD_NIQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ)
+
+#define S_FW_PFVF_CMD_PMASK 20
+#define M_FW_PFVF_CMD_PMASK 0xf
+#define G_FW_PFVF_CMD_PMASK(x) \
+ (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK)
+
+#define S_FW_PFVF_CMD_NEQ 0
+#define M_FW_PFVF_CMD_NEQ 0xfffff
+#define G_FW_PFVF_CMD_NEQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ)
+
+#define S_FW_PFVF_CMD_TC 24
+#define M_FW_PFVF_CMD_TC 0xff
+#define G_FW_PFVF_CMD_TC(x) \
+ (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC)
+
+#define S_FW_PFVF_CMD_NVI 16
+#define M_FW_PFVF_CMD_NVI 0xff
+#define G_FW_PFVF_CMD_NVI(x) \
+ (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI)
+
+#define S_FW_PFVF_CMD_NEXACTF 0
+#define M_FW_PFVF_CMD_NEXACTF 0xffff
+#define G_FW_PFVF_CMD_NEXACTF(x) \
+ (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF)
+
+#define S_FW_PFVF_CMD_R_CAPS 24
+#define M_FW_PFVF_CMD_R_CAPS 0xff
+#define G_FW_PFVF_CMD_R_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS)
+
+#define S_FW_PFVF_CMD_WX_CAPS 16
+#define M_FW_PFVF_CMD_WX_CAPS 0xff
+#define G_FW_PFVF_CMD_WX_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS)
+
+#define S_FW_PFVF_CMD_NETHCTRL 0
+#define M_FW_PFVF_CMD_NETHCTRL 0xffff
+#define G_FW_PFVF_CMD_NETHCTRL(x) \
+ (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL)
+
/*
* ingress queue type; the first 1K ingress queues can have associated 0,
* 1 or 2 free lists and an interrupt, all other ingress queues lack these
@@ -473,6 +809,11 @@ enum fw_iq_type {
FW_IQ_TYPE_FL_INT_CAP,
};
+enum fw_iq_iqtype {
+ FW_IQ_IQTYPE_NIC = 1,
+ FW_IQ_IQTYPE_OFLD,
+};
+
struct fw_iq_cmd {
__be32 op_to_vfn;
__be32 alloc_to_len16;
@@ -606,6 +947,9 @@ struct fw_iq_cmd {
(((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN)
#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U)
+#define S_FW_IQ_CMD_IQTYPE 24
+#define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE)
+
#define S_FW_IQ_CMD_FL0CNGCHMAP 20
#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf
#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP)
@@ -724,6 +1068,11 @@ struct fw_eq_eth_cmd {
#define G_FW_EQ_ETH_CMD_EQID(x) \
(((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID)
+#define S_FW_EQ_ETH_CMD_PHYSEQID 0
+#define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff
+#define G_FW_EQ_ETH_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID)
+
#define S_FW_EQ_ETH_CMD_FETCHRO 22
#define M_FW_EQ_ETH_CMD_FETCHRO 0x1
#define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO)
@@ -786,6 +1135,75 @@ struct fw_eq_eth_cmd {
#define G_FW_EQ_ETH_CMD_VIID(x) \
(((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID)
+struct fw_eq_ctrl_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 cmpliqid_eqid;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define S_FW_EQ_CTRL_CMD_PFN 8
+#define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN)
+
+#define S_FW_EQ_CTRL_CMD_VFN 0
+#define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN)
+
+#define S_FW_EQ_CTRL_CMD_ALLOC 31
+#define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC)
+#define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U)
+
+#define S_FW_EQ_CTRL_CMD_FREE 30
+#define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE)
+#define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U)
+
+#define S_FW_EQ_CTRL_CMD_EQSTART 28
+#define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART)
+#define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U)
+
+#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
+#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
+
+#define S_FW_EQ_CTRL_CMD_EQID 0
+#define M_FW_EQ_CTRL_CMD_EQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID)
+#define G_FW_EQ_CTRL_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID)
+
+#define S_FW_EQ_CTRL_CMD_PHYSEQID 0
+#define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID)
+#define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID)
+
+#define S_FW_EQ_CTRL_CMD_FETCHRO 22
+#define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO)
+#define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20
+#define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_CTRL_CMD_PCIECHN 16
+#define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN)
+
+#define S_FW_EQ_CTRL_CMD_IQID 0
+#define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID)
+
+#define S_FW_EQ_CTRL_CMD_FBMIN 23
+#define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN)
+
+#define S_FW_EQ_CTRL_CMD_FBMAX 20
+#define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX)
+
+#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16
+#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_CTRL_CMD_EQSIZE 0
+#define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE)
+
enum fw_vi_func {
FW_VI_FUNC_ETH,
};
@@ -988,6 +1406,9 @@ struct fw_vi_enable_cmd {
(((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO)
#define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U)
+/* VI VF stats offset definitions */
+#define VI_VF_NUM_STATS 16
+
/* VI PF stats offset definitions */
#define VI_PF_NUM_STATS 17
enum fw_vi_stats_pf_index {
@@ -1065,7 +1486,16 @@ struct fw_vi_stats_cmd {
} u;
};
-/* port capabilities bitmap */
+#define S_FW_VI_STATS_CMD_VIID 0
+#define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID)
+
+#define S_FW_VI_STATS_CMD_NSTATS 12
+#define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS)
+
+#define S_FW_VI_STATS_CMD_IX 0
+#define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX)
+
+/* old 16-bit port capabilities bitmap */
enum fw_port_cap {
FW_PORT_CAP_SPEED_100M = 0x0001,
FW_PORT_CAP_SPEED_1G = 0x0002,
@@ -1100,9 +1530,45 @@ enum fw_port_mdi {
#define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI)
#define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI)
+/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */
+#define FW_PORT_CAP32_SPEED_100M 0x00000001UL
+#define FW_PORT_CAP32_SPEED_1G 0x00000002UL
+#define FW_PORT_CAP32_SPEED_10G 0x00000004UL
+#define FW_PORT_CAP32_SPEED_25G 0x00000008UL
+#define FW_PORT_CAP32_SPEED_40G 0x00000010UL
+#define FW_PORT_CAP32_SPEED_50G 0x00000020UL
+#define FW_PORT_CAP32_SPEED_100G 0x00000040UL
+#define FW_PORT_CAP32_FC_RX 0x00010000UL
+#define FW_PORT_CAP32_FC_TX 0x00020000UL
+#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL
+#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL
+#define FW_PORT_CAP32_ANEG 0x00100000UL
+#define FW_PORT_CAP32_MDIX 0x00200000UL
+#define FW_PORT_CAP32_MDIAUTO 0x00400000UL
+#define FW_PORT_CAP32_FEC_RS 0x00800000UL
+#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL
+
+#define S_FW_PORT_CAP32_SPEED 0
+#define M_FW_PORT_CAP32_SPEED 0xfff
+#define V_FW_PORT_CAP32_SPEED(x) ((x) << S_FW_PORT_CAP32_SPEED)
+#define G_FW_PORT_CAP32_SPEED(x) \
+ (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED)
+
+enum fw_port_mdi32 {
+ FW_PORT_CAP32_MDI_AUTO,
+};
+
+#define S_FW_PORT_CAP32_MDI 21
+#define M_FW_PORT_CAP32_MDI 3
+#define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI)
+#define G_FW_PORT_CAP32_MDI(x) \
+ (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI)
+
enum fw_port_action {
FW_PORT_ACTION_L1_CFG = 0x0001,
FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
+ FW_PORT_ACTION_L1_CFG32 = 0x0009,
+ FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a,
};
struct fw_port_cmd {
@@ -1191,6 +1657,18 @@ struct fw_port_cmd {
__be64 r12;
} control;
} dcb;
+ struct fw_port_l1cfg32 {
+ __be32 rcap32;
+ __be32 r;
+ } l1cfg32;
+ struct fw_port_info32 {
+ __be32 lstatus32_to_cbllen32;
+ __be32 auxlinfo32_mtu32;
+ __be32 linkattr32;
+ __be32 pcaps32;
+ __be32 acaps32;
+ __be32 lpacaps32;
+ } info32;
} u;
};
@@ -1264,6 +1742,36 @@ struct fw_port_cmd {
#define G_FW_PORT_CMD_MODTYPE(x) \
(((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE)
+#define S_FW_PORT_CMD_LSTATUS32 31
+#define M_FW_PORT_CMD_LSTATUS32 0x1
+#define V_FW_PORT_CMD_LSTATUS32(x) ((x) << S_FW_PORT_CMD_LSTATUS32)
+#define F_FW_PORT_CMD_LSTATUS32 V_FW_PORT_CMD_LSTATUS32(1U)
+
+#define S_FW_PORT_CMD_LINKDNRC32 28
+#define M_FW_PORT_CMD_LINKDNRC32 0x7
+#define G_FW_PORT_CMD_LINKDNRC32(x) \
+ (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32)
+
+#define S_FW_PORT_CMD_MDIOCAP32 26
+#define M_FW_PORT_CMD_MDIOCAP32 0x1
+#define V_FW_PORT_CMD_MDIOCAP32(x) ((x) << S_FW_PORT_CMD_MDIOCAP32)
+#define F_FW_PORT_CMD_MDIOCAP32 V_FW_PORT_CMD_MDIOCAP32(1U)
+
+#define S_FW_PORT_CMD_MDIOADDR32 21
+#define M_FW_PORT_CMD_MDIOADDR32 0x1f
+#define G_FW_PORT_CMD_MDIOADDR32(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32)
+
+#define S_FW_PORT_CMD_PORTTYPE32 13
+#define M_FW_PORT_CMD_PORTTYPE32 0xff
+#define G_FW_PORT_CMD_PORTTYPE32(x) \
+ (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32)
+
+#define S_FW_PORT_CMD_MODTYPE32 8
+#define M_FW_PORT_CMD_MODTYPE32 0x1f
+#define G_FW_PORT_CMD_MODTYPE32(x) \
+ (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32)
+
/*
* These are configured into the VPD and hence tools that generate
* VPD may use this enumeration.
@@ -1532,6 +2040,83 @@ struct fw_rss_ind_tbl_cmd {
#define G_FW_RSS_IND_TBL_CMD_IQ2(x) \
(((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2)
+struct fw_rss_glb_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_rss_glb_config {
+ struct fw_rss_glb_config_manual {
+ __be32 mode_pkd;
+ __be32 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_glb_config_basicvirtual {
+ __be32 mode_keymode;
+ __be32 synmapen_to_hashtoeplitz;
+ __be64 r8;
+ __be64 r9;
+ } basicvirtual;
+ } u;
+};
+
+#define S_FW_RSS_GLB_CONFIG_CMD_MODE 28
+#define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf
+#define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE)
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8
+#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3
+#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \
+ V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0
+#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ)
+#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \
+ V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U)
+
struct fw_rss_vi_config_cmd {
__be32 op_to_viid;
__be32 retval_len16;
@@ -1611,6 +2196,22 @@ struct fw_rss_vi_config_cmd {
(((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN)
#define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U)
+struct fw_clip_cmd {
+ __be32 op_to_write;
+ __be32 alloc_to_len16;
+ __be64 ip_hi;
+ __be64 ip_lo;
+ __be32 r4[2];
+};
+
+#define S_FW_CLIP_CMD_ALLOC 31
+#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC)
+#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U)
+
+#define S_FW_CLIP_CMD_FREE 30
+#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE)
+#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U)
+
/******************************************************************************
* D E B U G C O M M A N D s
******************************************************/
diff --git a/drivers/net/cxgbe/base/t4vf_hw.c b/drivers/net/cxgbe/base/t4vf_hw.c
new file mode 100644
index 00000000..d96456bb
--- /dev/null
+++ b/drivers/net/cxgbe/base/t4vf_hw.c
@@ -0,0 +1,880 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+
+#include "common.h"
+#include "t4_regs.h"
+
+/**
+ * t4vf_wait_dev_ready - wait till to reads of registers work
+ *
+ * Wait for the device to become ready (signified by our "who am I" register
+ * returning a value other than all 1's). Return an error if it doesn't
+ * become ready ...
+ */
+static int t4vf_wait_dev_ready(struct adapter *adapter)
+{
+ const u32 whoami = T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI;
+ const u32 notready1 = 0xffffffff;
+ const u32 notready2 = 0xeeeeeeee;
+ u32 val;
+
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+
+ msleep(500);
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+
+ dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
+ val);
+ return -EIO;
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
+}
+
+/**
+ * t4vf_wr_mbox_core - send a command to FW through the mailbox
+ * @adapter: the adapter
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sends the given command to FW through the mailbox and waits for the
+ * FW to execute the command. If @rpl is not %NULL it is used to store
+ * the FW's reply to the command. The command and its optional reply
+ * are of the same length. FW can take up to 500 ms to respond.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ * The return value is 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4vf_wr_mbox_core(struct adapter *adapter,
+ const void __attribute__((__may_alias__)) *cmd,
+ int size, void *rpl, bool sleep_ok)
+{
+ /*
+ * We delay in small increments at first in an effort to maintain
+ * responsiveness for simple, fast executing commands but then back
+ * off to larger delays to a maximum retry delay.
+ */
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100
+ };
+
+
+ u32 mbox_ctl = T4VF_CIM_BASE_ADDR + A_CIM_VF_EXT_MAILBOX_CTRL;
+ __be64 cmd_rpl[MBOX_LEN / 8];
+ struct mbox_entry entry;
+ unsigned int delay_idx;
+ u32 v, mbox_data;
+ const __be64 *p;
+ int i, ret;
+ int ms;
+
+ /* In T6, mailbox size is changed to 128 bytes to avoid
+ * invalidating the entire prefetch buffer.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ mbox_data = T4VF_MBDATA_BASE_ADDR;
+ else
+ mbox_data = T6VF_MBDATA_BASE_ADDR;
+
+ /*
+ * Commands must be multiples of 16 bytes in length and may not be
+ * larger than the size of the Mailbox Data register array.
+ */
+ if ((size % 16) != 0 ||
+ size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
+ return -EINVAL;
+
+ /*
+ * Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ t4_os_atomic_add_tail(&entry, &adapter->mbox_list, &adapter->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /*
+ * If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rarely
+ * contend on access to the mailbox ...
+ */
+ if (i > (2 * FW_CMD_MAX_TIMEOUT)) {
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+ ret = -EBUSY;
+ return ret;
+ }
+
+ /*
+ * If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (t4_os_list_first_entry(&adapter->mbox_list) == &entry)
+ break;
+
+ /*
+ * Delay for a bit before checking again ...
+ */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+ }
+
+ /*
+ * Loop trying to get ownership of the mailbox. Return an error
+ * if we can't gain ownership.
+ */
+ v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+ for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
+ v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+
+ if (v != X_MBOWNER_PL) {
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+ ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ return ret;
+ }
+
+ /*
+ * Write the command array into the Mailbox Data register array and
+ * transfer ownership of the mailbox to the firmware.
+ */
+ for (i = 0, p = cmd; i < size; i += 8)
+ t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+
+ t4_read_reg(adapter, mbox_data); /* flush write */
+ t4_write_reg(adapter, mbox_ctl,
+ F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
+ t4_read_reg(adapter, mbox_ctl); /* flush write */
+ delay_idx = 0;
+ ms = delay[0];
+
+ /*
+ * Spin waiting for firmware to acknowledge processing our command.
+ */
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i++) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+
+ /*
+ * If we're the owner, see if this is the reply we wanted.
+ */
+ v = t4_read_reg(adapter, mbox_ctl);
+ if (G_MBOWNER(v) == X_MBOWNER_PL) {
+ /*
+ * If the Message Valid bit isn't on, revoke ownership
+ * of the mailbox and continue waiting for our reply.
+ */
+ if ((v & F_MBMSGVALID) == 0) {
+ t4_write_reg(adapter, mbox_ctl,
+ V_MBOWNER(X_MBOWNER_NONE));
+ continue;
+ }
+
+ /*
+ * We now have our reply. Extract the command return
+ * value, copy the reply back to our caller's buffer
+ * (if specified) and revoke ownership of the mailbox.
+ * We return the (negated) firmware command return
+ * code (this depends on FW_SUCCESS == 0). (Again we
+ * avoid clogging the log with FW_VI_STATS_CMD
+ * reply results.)
+ */
+
+ /*
+ * Retrieve the command reply and release the mailbox.
+ */
+ get_mbox_rpl(adapter, cmd_rpl, size / 8, mbox_data);
+ t4_write_reg(adapter, mbox_ctl,
+ V_MBOWNER(X_MBOWNER_NONE));
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+
+ /* return value in high-order host-endian word */
+ v = be64_to_cpu(cmd_rpl[0]);
+
+ if (rpl) {
+ /* request bit in high-order BE word */
+ WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+ & F_FW_CMD_REQUEST) == 0);
+ memcpy(rpl, cmd_rpl, size);
+ }
+ return -((int)G_FW_CMD_RETVAL(v));
+ }
+ }
+
+ /*
+ * We timed out. Return the error ...
+ */
+ dev_err(adapter, "command %#x timed out\n",
+ *(const u8 *)cmd);
+ dev_err(adapter, " Control = %#x\n", t4_read_reg(adapter, mbox_ctl));
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list, &adapter->mbox_lock);
+ ret = -ETIMEDOUT;
+ return ret;
+}
+
+/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware resetting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) |
+ F_FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd)));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int t4vf_prep_adapter(struct adapter *adapter)
+{
+ u32 pl_vf_rev;
+ int ret, ver;
+
+ ret = t4vf_wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Default port and clock for debugging in case we can't reach
+ * firmware.
+ */
+ adapter->params.nports = 1;
+ adapter->params.vfres.pmask = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ pl_vf_rev = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ adapter->params.pci.device_id = adapter->pdev->id.device_id;
+ adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
+
+ /*
+ * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
+ * ADAPTER (VERSION << 4 | REVISION)
+ */
+ ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
+ adapter->params.chip = 0;
+ switch (ver) {
+ case CHELSIO_T5:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5,
+ pl_vf_rev);
+ adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6,
+ pl_vf_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+ default:
+ dev_err(adapter, "%s: Device %d is not supported\n",
+ __func__, adapter->params.pci.device_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_query_params - query FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Reads the values of firmware or device parameters. Up to 7 parameters
+ * can be queried at once.
+ */
+int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
+{
+ struct fw_params_cmd cmd, rpl;
+ struct fw_params_param *p;
+ unsigned int i;
+ size_t len16;
+ int ret;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
+ p->mnem = cpu_to_be32(*params++);
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret == 0)
+ for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
+ *vals++ = be32_to_cpu(p->val);
+ return ret;
+}
+
+/**
+ * t4vf_get_vpd_params - retrieve device VPD paremeters
+ * @adapter: the adapter
+ *
+ * Retrives various device Vital Product Data parameters. The parameters
+ * are stored in @adapter->params.vpd.
+ */
+int t4vf_get_vpd_params(struct adapter *adapter)
+{
+ struct vpd_params *vpd_params = &adapter->params.vpd;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ vpd_params->cclk = vals[0];
+ dev_debug(adapter, "%s: vpd_params->cclk = %u\n",
+ __func__, vpd_params->cclk);
+ return 0;
+}
+
+/**
+ * t4vf_get_dev_params - retrieve device paremeters
+ * @adapter: the adapter
+ *
+ * Retrives fw and tp version.
+ */
+int t4vf_get_dev_params(struct adapter *adapter)
+{
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
+ v = t4vf_query_params(adapter, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ adapter->params.fw_vers = vals[0];
+ adapter->params.tp_vers = vals[1];
+
+ dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
+
+ dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
+ return 0;
+}
+
+/**
+ * t4vf_set_params - sets FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Sets the values of firmware or device parameters. Up to 7 parameters
+ * can be specified at once.
+ */
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals)
+{
+ struct fw_params_param *p;
+ struct fw_params_cmd cmd;
+ unsigned int i;
+ size_t len16;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
+ p->mnem = cpu_to_be32(*params++);
+ p->val = cpu_to_be32(*vals++);
+ }
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_fl_pkt_align - return the fl packet alignment
+ * @adapter: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ */
+int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control,
+ u32 sge_control2)
+{
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ ingpad_shift = X_INGPADBOUNDARY_SHIFT;
+ else
+ ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
+
+ ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adapter->params.chip)) {
+ ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+ if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ X_INGPACKBOUNDARY_SHIFT);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
+ return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami));
+}
+
+/**
+ * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
+ * @adapter: the adapter
+ *
+ * Retrieves global RSS mode and parameters with which we have to live
+ * and stores them in the @adapter's RSS parameters.
+ */
+int t4vf_get_rss_glb_config(struct adapter *adapter)
+{
+ struct rss_params *rss = &adapter->params.rss;
+ struct fw_rss_glb_config_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute an RSS Global Configuration read command to retrieve
+ * our RSS configuration.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Translate the big-endian RSS Global Configuration into our
+ * cpu-endian format based on the RSS mode. We also do first level
+ * filtering at this point to weed out modes which don't support
+ * VF Drivers ...
+ */
+ rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE
+ (be32_to_cpu(rpl.u.manual.mode_pkd));
+ switch (rss->mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = be32_to_cpu
+ (rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
+
+ rss->u.basicvirtual.synmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+ rss->u.basicvirtual.syn4tupenipv6 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn2tupenipv6 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn4tupenipv4 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+ rss->u.basicvirtual.syn2tupenipv4 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+ rss->u.basicvirtual.ofdmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+ rss->u.basicvirtual.tnlmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+ rss->u.basicvirtual.tnlalllookup =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+ rss->u.basicvirtual.hashtoeplitz =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+
+ /* we need at least Tunnel Map Enable to be set */
+ if (!rss->u.basicvirtual.tnlmapen)
+ return -EINVAL;
+ break;
+ }
+
+ default:
+ /* all unknown/unsupported RSS modes result in an error */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_get_vfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a virtual
+ * function. The results are stored in @adapter->vfres.
+ */
+int t4vf_get_vfres(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ u32 word;
+ int v;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Extract VF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+ vfres->niq = G_FW_PFVF_CMD_NIQ(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ vfres->neq = G_FW_PFVF_CMD_NEQ(word);
+ vfres->pmask = G_FW_PFVF_CMD_PMASK(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ vfres->tc = G_FW_PFVF_CMD_TC(word);
+ vfres->nvi = G_FW_PFVF_CMD_NVI(word);
+ vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
+ vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
+ vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
+ return 0;
+}
+
+/**
+ * t4vf_get_port_stats_fw - collect "port" statistics via Firmware
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface via Firmware
+ * commands.
+ */
+static int t4vf_get_port_stats_fw(struct adapter *adapter, int pidx,
+ struct port_stats *p)
+{
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ unsigned int rem = VI_VF_NUM_STATS;
+ struct fw_vi_stats_vf fwstats;
+ __be64 *fwsp = (__be64 *)&fwstats;
+
+ /*
+ * Grab the Virtual Interface statistics a chunk at a time via mailbox
+ * commands. We could use a Work Request and get all of them at once
+ * but that's an asynchronous interface which is awkward to use.
+ */
+ while (rem) {
+ unsigned int ix = VI_VF_NUM_STATS - rem;
+ unsigned int nstats = min(6U, rem);
+ struct fw_vi_stats_cmd cmd, rpl;
+ size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
+ sizeof(struct fw_vi_stats_ctl));
+ size_t len16 = DIV_ROUND_UP(len, 16);
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_STATS_CMD) |
+ V_FW_VI_STATS_CMD_VIID(pi->viid) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ cmd.u.ctl.nstats_ix =
+ cpu_to_be16(V_FW_VI_STATS_CMD_IX(ix) |
+ V_FW_VI_STATS_CMD_NSTATS(nstats));
+ ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
+
+ rem -= nstats;
+ fwsp += nstats;
+ }
+
+ /*
+ * Translate firmware statistics into host native statistics.
+ */
+ p->tx_octets = be64_to_cpu(fwstats.tx_bcast_bytes) +
+ be64_to_cpu(fwstats.tx_mcast_bytes) +
+ be64_to_cpu(fwstats.tx_ucast_bytes);
+ p->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
+ p->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
+ p->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
+ p->tx_drop = be64_to_cpu(fwstats.tx_drop_frames);
+
+ p->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
+ p->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
+ p->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
+ p->rx_len_err = be64_to_cpu(fwstats.rx_err_frames);
+
+ return 0;
+}
+
+/**
+ * t4vf_get_port_stats - collect "port" statistics
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface.
+ */
+void t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct port_stats *p)
+{
+ /*
+ * If this is not the first Virtual Interface for our Virtual
+ * Function, we need to use Firmware commands to retrieve its
+ * MPS statistics.
+ */
+ if (pidx != 0)
+ t4vf_get_port_stats_fw(adapter, pidx, p);
+
+ /*
+ * But for the first VI, we can grab its statistics via the MPS
+ * register mapped into the VF register space.
+ */
+#define GET_STAT(name) \
+ t4_read_reg64(adapter, \
+ T4VF_MPS_BASE_ADDR + A_MPS_VF_STAT_##name##_L)
+ p->tx_octets = GET_STAT(TX_VF_BCAST_BYTES) +
+ GET_STAT(TX_VF_MCAST_BYTES) +
+ GET_STAT(TX_VF_UCAST_BYTES);
+ p->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
+ p->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
+ p->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
+ p->tx_drop = GET_STAT(TX_VF_DROP_FRAMES);
+
+ p->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
+ p->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
+ p->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
+
+ p->rx_len_err = GET_STAT(RX_VF_ERR_FRAMES);
+#undef GET_STAT
+}
+
+static int t4vf_alloc_vi(struct adapter *adapter, int port_id)
+{
+ struct fw_vi_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute a VI command to allocate Virtual Interface and return its
+ * VIID.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE |
+ F_FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+ F_FW_VI_CMD_ALLOC);
+ cmd.portid_pkd = V_FW_VI_CMD_PORTID(port_id);
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+ return G_FW_VI_CMD_VIID(be16_to_cpu(rpl.type_to_viid));
+}
+
+int t4vf_port_init(struct adapter *adapter)
+{
+ unsigned int fw_caps = adapter->params.fw_caps_support;
+ struct fw_port_cmd port_cmd, port_rpl;
+ struct fw_vi_cmd vi_cmd, vi_rpl;
+ fw_port_cap32_t pcaps, acaps;
+ enum fw_port_type port_type;
+ int mdio_addr;
+ int ret, i;
+
+ for_each_port(adapter, i) {
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ /*
+ * If we haven't yet determined if we're talking to Firmware
+ * which knows the new 32-bit Port Caps, it's time to find
+ * out now. This will also tell new Firmware to send us Port
+ * Status Updates using the new 32-bit Port Capabilities
+ * version of the Port Information message.
+ */
+ if (fw_caps == FW_CAPS_UNKNOWN) {
+ u32 param, val;
+
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X
+ (FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
+ val = 1;
+ ret = t4vf_set_params(adapter, 1, &param, &val);
+ fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
+ adapter->params.fw_caps_support = fw_caps;
+ }
+
+ ret = t4vf_alloc_vi(adapter, p->port_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot allocate VI for port %d:"
+ " err=%d\n", p->port_id, ret);
+ return ret;
+ }
+ p->viid = ret;
+
+ /*
+ * Execute a VI Read command to get our Virtual Interface
+ * information like MAC address, etc.
+ */
+ memset(&vi_cmd, 0, sizeof(vi_cmd));
+ vi_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
+ vi_cmd.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(p->viid));
+ ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ p->rss_size = G_FW_VI_CMD_RSSSIZE
+ (be16_to_cpu(vi_rpl.norss_rsssize));
+ t4_os_set_hw_addr(adapter, i, vi_rpl.mac);
+
+ /*
+ * If we don't have read access to our port information, we're
+ * done now. Else, execute a PORT Read command to get it ...
+ */
+ if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
+ return 0;
+
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32
+ (V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(p->port_id));
+ port_cmd.action_to_len16 = cpu_to_be32
+ (V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
+ FW_PORT_ACTION_GET_PORT_INFO :
+ FW_PORT_ACTION_GET_PORT_INFO32) |
+ FW_LEN16(port_cmd));
+ ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ /*
+ * Extract the various fields from the Port Information message.
+ */
+ if (fw_caps == FW_CAPS16) {
+ u32 lstatus = be32_to_cpu
+ (port_rpl.u.info.lstatus_to_modtype);
+
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) ?
+ (int)G_FW_PORT_CMD_MDIOADDR(lstatus) :
+ -1);
+ pcaps = fwcaps16_to_caps32
+ (be16_to_cpu(port_rpl.u.info.pcap));
+ acaps = fwcaps16_to_caps32
+ (be16_to_cpu(port_rpl.u.info.acap));
+ } else {
+ u32 lstatus32 = be32_to_cpu
+ (port_rpl.u.info32.lstatus32_to_cbllen32);
+
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
+ (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
+ -1);
+ pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32);
+ acaps = be32_to_cpu(port_rpl.u.info32.acaps32);
+ }
+
+ p->port_type = port_type;
+ p->mdio_addr = mdio_addr;
+ p->mod_type = FW_PORT_MOD_TYPE_NA;
+ init_link_config(&p->link_cfg, pcaps, acaps);
+ }
+ return 0;
+}
diff --git a/drivers/net/cxgbe/base/t4vf_hw.h b/drivers/net/cxgbe/base/t4vf_hw.h
new file mode 100644
index 00000000..55e436e7
--- /dev/null
+++ b/drivers/net/cxgbe/base/t4vf_hw.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4VF_HW_H
+#define __T4VF_HW_H
+
+#define T4VF_PL_BASE_ADDR 0x0200
+#define T4VF_CIM_BASE_ADDR 0x0300
+#define T4VF_MBDATA_BASE_ADDR 0x0240
+#define T6VF_MBDATA_BASE_ADDR 0x0280
+
+#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES NUM_CIM_PF_MAILBOX_DATA_INSTANCES
+#endif /* __T4VF_HW_H */
diff --git a/drivers/net/cxgbe/clip_tbl.c b/drivers/net/cxgbe/clip_tbl.c
new file mode 100644
index 00000000..5e4dc527
--- /dev/null
+++ b/drivers/net/cxgbe/clip_tbl.c
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include "common.h"
+#include "clip_tbl.h"
+
+/**
+ * Allocate clip entry in HW with associated IPV4/IPv6 address
+ */
+static int clip6_get_mbox(const struct rte_eth_dev *dev, const u32 *lip)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct fw_clip_cmd c;
+ u64 hi = ((u64)lip[1]) << 32 | lip[0];
+ u64 lo = ((u64)lip[3]) << 32 | lip[2];
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+ c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
+ c.ip_hi = hi;
+ c.ip_lo = lo;
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+/**
+ * Delete clip entry in HW having the associated IPV4/IPV6 address
+ */
+static int clip6_release_mbox(const struct rte_eth_dev *dev, const u32 *lip)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct fw_clip_cmd c;
+ u64 hi = ((u64)lip[1]) << 32 | lip[0];
+ u64 lo = ((u64)lip[3]) << 32 | lip[2];
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
+ c.ip_hi = hi;
+ c.ip_lo = lo;
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+/**
+ * cxgbe_clip_release - Release associated CLIP entry
+ * @ce: clip entry to release
+ *
+ * Releases ref count and frees up a clip entry from CLIP table
+ */
+void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
+{
+ int ret;
+
+ t4_os_lock(&ce->lock);
+ if (rte_atomic32_dec_and_test(&ce->refcnt)) {
+ ret = clip6_release_mbox(dev, ce->addr);
+ if (ret)
+ dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
+ }
+ t4_os_unlock(&ce->lock);
+}
+
+/**
+ * find_or_alloc_clipe - Find/Allocate a free CLIP entry
+ * @c: CLIP table
+ * @lip: IPV4/IPV6 address to compare/add
+ * Returns pointer to the IPV4/IPV6 entry found/created
+ *
+ * Finds/Allocates an CLIP entry to be used for a filter rule.
+ */
+static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
+ const u32 *lip)
+{
+ struct clip_entry *end, *e;
+ struct clip_entry *first_free = NULL;
+ unsigned int clipt_size = c->clipt_size;
+
+ for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
+ if (rte_atomic32_read(&e->refcnt) == 0) {
+ if (!first_free)
+ first_free = e;
+ } else {
+ if (memcmp(lip, e->addr, sizeof(e->addr)) == 0)
+ goto exists;
+ }
+ }
+
+ if (first_free) {
+ e = first_free;
+ goto exists;
+ }
+
+ return NULL;
+
+exists:
+ return e;
+}
+
+static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
+ u32 *lip, u8 v6)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct clip_tbl *ctbl = adap->clipt;
+ struct clip_entry *ce;
+ int ret = 0;
+
+ if (!ctbl)
+ return NULL;
+
+ t4_os_write_lock(&ctbl->lock);
+ ce = find_or_alloc_clipe(ctbl, lip);
+ if (ce) {
+ t4_os_lock(&ce->lock);
+ if (!rte_atomic32_read(&ce->refcnt)) {
+ rte_memcpy(ce->addr, lip, sizeof(ce->addr));
+ if (v6) {
+ ce->type = FILTER_TYPE_IPV6;
+ rte_atomic32_set(&ce->refcnt, 1);
+ ret = clip6_get_mbox(dev, lip);
+ if (ret)
+ dev_debug(adap,
+ "CLIP FW ADD CMD failed: %d",
+ ret);
+ } else {
+ ce->type = FILTER_TYPE_IPV4;
+ }
+ } else {
+ rte_atomic32_inc(&ce->refcnt);
+ }
+ t4_os_unlock(&ce->lock);
+ }
+ t4_os_write_unlock(&ctbl->lock);
+
+ return ret ? NULL : ce;
+}
+
+/**
+ * cxgbe_clip_alloc - Allocate a IPV6 CLIP entry
+ * @dev: rte_eth_dev pointer
+ * @lip: IPV6 address to add
+ * Returns pointer to the CLIP entry created
+ *
+ * Allocates a IPV6 CLIP entry to be used for a filter rule.
+ */
+struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip)
+{
+ return t4_clip_alloc(dev, lip, FILTER_TYPE_IPV6);
+}
+
+/**
+ * Initialize CLIP Table
+ */
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end)
+{
+ unsigned int clipt_size;
+ struct clip_tbl *ctbl;
+ unsigned int i;
+
+ if (clipt_start >= clipt_end)
+ return NULL;
+
+ clipt_size = clipt_end - clipt_start + 1;
+
+ ctbl = t4_os_alloc(sizeof(*ctbl) +
+ clipt_size * sizeof(struct clip_entry));
+ if (!ctbl)
+ return NULL;
+
+ ctbl->clipt_start = clipt_start;
+ ctbl->clipt_size = clipt_size;
+
+ t4_os_rwlock_init(&ctbl->lock);
+
+ for (i = 0; i < ctbl->clipt_size; i++) {
+ t4_os_lock_init(&ctbl->cl_list[i].lock);
+ rte_atomic32_set(&ctbl->cl_list[i].refcnt, 0);
+ }
+
+ return ctbl;
+}
+
+/**
+ * Cleanup CLIP Table
+ */
+void t4_cleanup_clip_tbl(struct adapter *adap)
+{
+ if (adap->clipt)
+ t4_os_free(adap->clipt);
+}
diff --git a/drivers/net/cxgbe/clip_tbl.h b/drivers/net/cxgbe/clip_tbl.h
new file mode 100644
index 00000000..737ccc69
--- /dev/null
+++ b/drivers/net/cxgbe/clip_tbl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_CLIP_H_
+#define _CXGBE_CLIP_H_
+
+/*
+ * State for the corresponding entry of the HW CLIP table.
+ */
+struct clip_entry {
+ enum filter_type type; /* entry type */
+ u32 addr[4]; /* IPV4 or IPV6 address */
+ rte_spinlock_t lock; /* entry lock */
+ rte_atomic32_t refcnt; /* entry reference count */
+};
+
+struct clip_tbl {
+ unsigned int clipt_start; /* start index of CLIP table */
+ unsigned int clipt_size; /* size of CLIP table */
+ rte_rwlock_t lock; /* table rw lock */
+ struct clip_entry cl_list[0]; /* MUST BE LAST */
+};
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end);
+void t4_cleanup_clip_tbl(struct adapter *adap);
+struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip);
+void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce);
+#endif /* _CXGBE_CLIP_H_ */
diff --git a/drivers/net/cxgbe/cxgbe.h b/drivers/net/cxgbe/cxgbe.h
index f9891545..5e6f5c98 100644
--- a/drivers/net/cxgbe/cxgbe.h
+++ b/drivers/net/cxgbe/cxgbe.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef _CXGBE_H_
@@ -46,20 +18,51 @@
#define CXGBE_MIN_RX_BUFSIZE ETHER_MIN_MTU /* min buf size */
#define CXGBE_MAX_RX_PKTLEN (9000 + ETHER_HDR_LEN + ETHER_CRC_LEN) /* max pkt */
+/* Max poll time is 100 * 100msec = 10 sec */
+#define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */
+#define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
+
+#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */
+#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_OTHER | \
+ ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+#define CXGBE_DEVARG_KEEP_OVLAN "keep_ovlan"
+#define CXGBE_DEVARG_FORCE_LINK_UP "force_link_up"
+
+bool force_linkup(struct adapter *adap);
int cxgbe_probe(struct adapter *adapter);
+int cxgbevf_probe(struct adapter *adapter);
void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
+int cxgbe_set_link_status(struct port_info *pi, bool status);
int cxgbe_up(struct adapter *adap);
int cxgbe_down(struct port_info *pi);
void cxgbe_close(struct adapter *adapter);
void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats);
+void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats);
void cxgbe_stats_reset(struct port_info *pi);
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, struct t4_completion *c);
int link_start(struct port_info *pi);
void init_rspq(struct adapter *adap, struct sge_rspq *q, unsigned int us,
unsigned int cnt, unsigned int size, unsigned int iqe_size);
int setup_sge_fwevtq(struct adapter *adapter);
+int setup_sge_ctrl_txq(struct adapter *adapter);
void cfg_queues(struct rte_eth_dev *eth_dev);
int cfg_queue_count(struct rte_eth_dev *eth_dev);
+int init_rss(struct adapter *adap);
int setup_rss(struct port_info *pi);
void cxgbe_enable_rx_queues(struct port_info *pi);
+void print_port_info(struct adapter *adap);
+void print_adapter_info(struct adapter *adap);
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key);
+void configure_max_ethqsets(struct adapter *adapter);
#endif /* _CXGBE_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h
index 03bba9fe..5d47c5f3 100644
--- a/drivers/net/cxgbe/cxgbe_compat.h
+++ b/drivers/net/cxgbe/cxgbe_compat.h
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#ifndef _CXGBE_COMPAT_H_
@@ -226,15 +198,6 @@ static inline int cxgbe_fls(int x)
return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
}
-/**
- * cxgbe_ffs - find first bit set
- * @x: the word to search
- */
-static inline int cxgbe_ffs(int x)
-{
- return x ? __builtin_ffs(x) : 0;
-}
-
static inline unsigned long ilog2(unsigned long n)
{
unsigned int e = 0;
@@ -278,4 +241,16 @@ static inline void writel_relaxed(unsigned int val, volatile void __iomem *addr)
rte_write32_relaxed(val, addr);
}
+/*
+ * Multiplies an integer by a fraction, while avoiding unnecessary
+ * overflow or loss of precision.
+ */
+#define mult_frac(x, numer, denom)( \
+{ \
+ typeof(x) quot = (x) / (denom); \
+ typeof(x) rem = (x) % (denom); \
+ (quot * (numer)) + ((rem * (numer)) / (denom)); \
+} \
+)
+
#endif /* _CXGBE_COMPAT_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 5cd260f4..4dcad7a2 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <sys/queue.h>
@@ -63,6 +35,8 @@
#include <rte_dev.h>
#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+#include "cxgbe_flow.h"
/*
* Macros needed to support the PCI Device ID Table ...
@@ -85,8 +59,21 @@
*/
#include "t4_pci_id_tbl.h"
-static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT |\
+ DEV_TX_OFFLOAD_IPV4_CKSUM |\
+ DEV_TX_OFFLOAD_UDP_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_CKSUM |\
+ DEV_TX_OFFLOAD_TCP_TSO)
+
+#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP |\
+ DEV_RX_OFFLOAD_CRC_STRIP |\
+ DEV_RX_OFFLOAD_IPV4_CKSUM |\
+ DEV_RX_OFFLOAD_JUMBO_FRAME |\
+ DEV_RX_OFFLOAD_UDP_CKSUM |\
+ DEV_RX_OFFLOAD_TCP_CKSUM)
+
+uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
uint16_t pkts_sent, pkts_remain;
@@ -119,8 +106,8 @@ static uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return total_sent;
}
-static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
unsigned int work_done;
@@ -135,8 +122,8 @@ static uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return work_done;
}
-static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
- struct rte_eth_dev_info *device_info)
+void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -148,8 +135,6 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
.nb_align = 1,
};
- device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
-
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
device_info->max_rx_queues = max_queues;
@@ -159,25 +144,22 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->max_vfs = adapter->params.arch.vfcount;
device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
- device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ device_info->rx_queue_offload_capa = 0UL;
+ device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
- device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ device_info->tx_queue_offload_capa = 0UL;
+ device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
device_info->reta_size = pi->rss_size;
+ device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
+ device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
device_info->rx_desc_lim = cxgbe_desc_lim;
device_info->tx_desc_lim = cxgbe_desc_lim;
cxgbe_get_speed_caps(pi, &device_info->speed_capa);
}
-static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -186,7 +168,7 @@ static void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
1, -1, 1, -1, false);
}
-static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -195,7 +177,7 @@ static void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
0, -1, 1, -1, false);
}
-static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -206,7 +188,7 @@ static void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
-1, 1, 1, -1, false);
}
-static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -217,28 +199,91 @@ static void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
-1, 0, 1, -1, false);
}
-static int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
- __rte_unused int wait_to_complete)
+int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
- struct rte_eth_link *old_link = &eth_dev->data->dev_link;
- unsigned int work_done, budget = 4;
+ struct rte_eth_link new_link = { 0 };
+ unsigned int i, work_done, budget = 32;
+ u8 old_link = pi->link_cfg.link_ok;
+
+ for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* Exit if link status changed or always forced up */
+ if (pi->link_cfg.link_ok != old_link || force_linkup(adapter))
+ break;
+
+ if (!wait_to_complete)
+ break;
+
+ rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
+ }
+
+ new_link.link_status = force_linkup(adapter) ?
+ ETH_LINK_UP : pi->link_cfg.link_ok;
+ new_link.link_autoneg = pi->link_cfg.autoneg;
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_speed = pi->link_cfg.speed;
+
+ return rte_eth_linkstatus_set(eth_dev, &new_link);
+}
+
+/**
+ * Set device link up.
+ */
+int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ unsigned int work_done, budget = 32;
+ struct sge *s = &adapter->sge;
+ int ret;
+ /* Flush all link events */
cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
- if (old_link->link_status == pi->link_cfg.link_ok)
- return -1; /* link not changed */
- eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = pi->link_cfg.speed;
+ /* If link already up, nothing to do */
+ if (pi->link_cfg.link_ok)
+ return 0;
+
+ ret = cxgbe_set_link_status(pi, true);
+ if (ret)
+ return ret;
- /* link has changed */
+ cxgbe_dev_link_update(dev, 1);
return 0;
}
-static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+/**
+ * Set device link down.
+ */
+int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ unsigned int work_done, budget = 32;
+ struct sge *s = &adapter->sge;
+ int ret;
+
+ /* Flush all link events */
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* If link already down, nothing to do */
+ if (!pi->link_cfg.link_ok)
+ return 0;
+
+ ret = cxgbe_set_link_status(pi, false);
+ if (ret)
+ return ret;
+
+ cxgbe_dev_link_update(dev, 0);
+ return 0;
+}
+
+int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -254,9 +299,11 @@ static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
/* set to jumbo mode if needed */
if (new_mtu > ETHER_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
-1, -1, true);
@@ -266,21 +313,13 @@ static int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
return err;
}
-static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id);
-static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id);
-static void cxgbe_dev_tx_queue_release(void *q);
-static void cxgbe_dev_rx_queue_release(void *q);
-
/*
* Stop device.
*/
-static void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
- int i, dev_down = 0;
CXGBE_FUNC_TRACE();
@@ -294,28 +333,12 @@ static void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
* have been disabled
*/
t4_sge_eth_clear_queues(pi);
-
- /* See if all ports are down */
- for_each_port(adapter, i) {
- pi = adap2pinfo(adapter, i);
- /*
- * Skip first port of the adapter since it will be closed
- * by DPDK
- */
- if (i == 0)
- continue;
- dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0;
- }
-
- /* If rest of the ports are stopped, then free up resources */
- if (dev_down == (adapter->params.nports - 1))
- cxgbe_close(adapter);
}
/* Start the device.
* It returns 0 on success.
*/
-static int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -367,7 +390,7 @@ out:
/*
* Stop device: disable rx and tx functions to allow for reconfiguring.
*/
-static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
+void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -386,19 +409,35 @@ static void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
t4_sge_eth_clear_queues(pi);
}
-static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
+ uint64_t configured_offloads;
int err;
CXGBE_FUNC_TRACE();
+ configured_offloads = eth_dev->data->dev_conf.rxmode.offloads;
+
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(configured_offloads)) {
+ dev_info(adapter, "can't disable hw crc strip\n");
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_CRC_STRIP;
+ }
if (!(adapter->flags & FW_QUEUE_BOUND)) {
err = setup_sge_fwevtq(adapter);
if (err)
return err;
adapter->flags |= FW_QUEUE_BOUND;
+ if (is_pf4(adapter)) {
+ err = setup_sge_ctrl_txq(adapter);
+ if (err)
+ return err;
+ }
}
err = cfg_queue_count(eth_dev);
@@ -408,8 +447,7 @@ static int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
return 0;
}
-static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id)
+int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
int ret;
struct sge_eth_txq *txq = (struct sge_eth_txq *)
@@ -424,8 +462,7 @@ static int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
- uint16_t tx_queue_id)
+int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
int ret;
struct sge_eth_txq *txq = (struct sge_eth_txq *)
@@ -440,10 +477,10 @@ static int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
- uint16_t queue_idx, uint16_t nb_desc,
- unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf)
+int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -452,8 +489,6 @@ static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
int err = 0;
unsigned int temp_nb_desc;
- RTE_SET_USED(tx_conf);
-
dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
__func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
socket_id, pi->first_qset);
@@ -488,13 +523,12 @@ static int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
s->fw_evtq.cntxt_id, socket_id);
- dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n",
- __func__, txq->q.cntxt_id, err);
-
+ dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
+ __func__, txq->q.cntxt_id, txq->q.abs_id, err);
return err;
}
-static void cxgbe_dev_tx_queue_release(void *q)
+void cxgbe_dev_tx_queue_release(void *q)
{
struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
@@ -510,8 +544,7 @@ static void cxgbe_dev_tx_queue_release(void *q)
}
}
-static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
- uint16_t rx_queue_id)
+int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
int ret;
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -530,8 +563,7 @@ static int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
- uint16_t rx_queue_id)
+int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
int ret;
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
@@ -549,11 +581,11 @@ static int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
return ret;
}
-static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
- uint16_t queue_idx, uint16_t nb_desc,
- unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mp)
+int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
{
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
struct adapter *adapter = pi->adapter;
@@ -565,8 +597,6 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info dev_info;
unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
- RTE_SET_USED(rx_conf);
-
dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
__func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
socket_id, mp);
@@ -613,21 +643,25 @@ static int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
/* Set to jumbo mode if necessary */
if (pkt_len > ETHER_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- eth_dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
&rxq->fl, t4_ethrx_handler,
- t4_get_tp_ch_map(adapter, pi->tx_chan), mp,
+ is_pf4(adapter) ?
+ t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
queue_idx, socket_id);
- dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
- __func__, err, pi->port_id, rxq->rspq.cntxt_id);
+ dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
+ __func__, err, pi->port_id, rxq->rspq.cntxt_id,
+ rxq->rspq.abs_id);
return err;
}
-static void cxgbe_dev_rx_queue_release(void *q)
+void cxgbe_dev_rx_queue_release(void *q)
{
struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
struct sge_rspq *rq = &rxq->rspq;
@@ -750,7 +784,7 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct adapter *adapter = pi->adapter;
struct link_config *lc = &pi->link_cfg;
- if (lc->supported & FW_PORT_CAP_ANEG) {
+ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
if (fc_conf->autoneg)
lc->requested_fc |= PAUSE_AUTONEG;
else
@@ -773,7 +807,7 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
&pi->link_cfg);
}
-static const uint32_t *
+const uint32_t *
cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
{
static const uint32_t ptypes[] = {
@@ -787,6 +821,88 @@ cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
+/* Update RSS hash configuration
+ */
+static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int err;
+
+ err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
+ if (err)
+ return err;
+
+ pi->rss_hf = rss_conf->rss_hf;
+
+ if (rss_conf->rss_key) {
+ u32 key[10], mod_key[10];
+ int i, j;
+
+ memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
+
+ for (i = 9, j = 0; i >= 0; i--, j++)
+ mod_key[j] = cpu_to_be32(key[i]);
+
+ t4_write_rss_key(adapter, mod_key, -1);
+ }
+
+ return 0;
+}
+
+/* Get RSS hash configuration
+ */
+static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ u64 rss_hf = 0;
+ u64 flags = 0;
+ int err;
+
+ err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ &flags, NULL);
+
+ if (err)
+ return err;
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
+ rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
+ rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
+ }
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ }
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
+
+ rss_conf->rss_hf = rss_hf;
+
+ if (rss_conf->rss_key) {
+ u32 key[10], mod_key[10];
+ int i, j;
+
+ t4_read_rss_key(adapter, key);
+
+ for (i = 9, j = 0; i >= 0; i--, j++)
+ mod_key[j] = be32_to_cpu(key[i]);
+
+ memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
+ }
+
+ return 0;
+}
+
static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
{
RTE_SET_USED(dev);
@@ -956,6 +1072,23 @@ static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
return 0;
}
+int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ ret = t4_change_mac(adapter, adapter->mbox, pi->viid,
+ pi->xact_addr_filt, (u8 *)addr, true, true);
+ if (ret < 0) {
+ dev_err(adapter, "failed to set mac addr; err = %d\n",
+ ret);
+ return ret;
+ }
+ pi->xact_addr_filt = ret;
+ return 0;
+}
+
static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.dev_start = cxgbe_dev_start,
.dev_stop = cxgbe_dev_stop,
@@ -968,6 +1101,8 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.dev_infos_get = cxgbe_dev_info_get,
.dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
.link_update = cxgbe_dev_link_update,
+ .dev_set_link_up = cxgbe_dev_set_link_up,
+ .dev_set_link_down = cxgbe_dev_set_link_down,
.mtu_set = cxgbe_dev_mtu_set,
.tx_queue_setup = cxgbe_dev_tx_queue_setup,
.tx_queue_start = cxgbe_dev_tx_queue_start,
@@ -977,6 +1112,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.rx_queue_start = cxgbe_dev_rx_queue_start,
.rx_queue_stop = cxgbe_dev_rx_queue_stop,
.rx_queue_release = cxgbe_dev_rx_queue_release,
+ .filter_ctrl = cxgbe_dev_filter_ctrl,
.stats_get = cxgbe_dev_stats_get,
.stats_reset = cxgbe_dev_stats_reset,
.flow_ctrl_get = cxgbe_flow_ctrl_get,
@@ -985,6 +1121,9 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.get_eeprom = cxgbe_get_eeprom,
.set_eeprom = cxgbe_set_eeprom,
.get_reg = cxgbe_get_regs,
+ .rss_hash_update = cxgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
+ .mac_addr_set = cxgbe_mac_addr_set,
};
/*
@@ -1004,14 +1143,34 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &cxgbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- /* for secondary processes, we don't initialise any further as primary
- * has already done this work.
+ /* for secondary processes, we attach to ethdevs allocated by primary
+ * and do minimal initialization.
*/
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ int i;
+
+ for (i = 1; i < MAX_NPORTS; i++) {
+ struct rte_eth_dev *rest_eth_dev;
+ char namei[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(namei, sizeof(namei), "%s_%d",
+ pci_dev->device.name, i);
+ rest_eth_dev = rte_eth_dev_attach_secondary(namei);
+ if (rest_eth_dev) {
+ rest_eth_dev->device = &pci_dev->device;
+ rest_eth_dev->dev_ops =
+ eth_dev->dev_ops;
+ rest_eth_dev->rx_pkt_burst =
+ eth_dev->rx_pkt_burst;
+ rest_eth_dev->tx_pkt_burst =
+ eth_dev->tx_pkt_burst;
+ rte_eth_dev_probing_finish(rest_eth_dev);
+ }
+ }
return 0;
-
- pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ }
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
@@ -1043,6 +1202,16 @@ out_free_adapter:
return err;
}
+static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ /* Free up other ports and all resources */
+ cxgbe_close(adap);
+ return 0;
+}
+
static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
@@ -1052,7 +1221,7 @@ static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
}
static struct rte_pci_driver rte_cxgbe_pmd = {
@@ -1065,3 +1234,6 @@ static struct rte_pci_driver rte_cxgbe_pmd = {
RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
+ CXGBE_DEVARG_KEEP_OVLAN "=<0|1> "
+ CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> ");
diff --git a/drivers/net/cxgbe/cxgbe_filter.c b/drivers/net/cxgbe/cxgbe_filter.c
new file mode 100644
index 00000000..7f0d3800
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_filter.c
@@ -0,0 +1,1252 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include <rte_net.h>
+#include "common.h"
+#include "t4_tcb.h"
+#include "t4_regs.h"
+#include "cxgbe_filter.h"
+#include "clip_tbl.h"
+
+/**
+ * Initialize Hash Filters
+ */
+int init_hash_filter(struct adapter *adap)
+{
+ unsigned int n_user_filters;
+ unsigned int user_filter_perc;
+ int ret;
+ u32 params[7], val[7];
+
+#define FW_PARAM_DEV(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+ V_FW_PARAMS_PARAM_Y(0) | \
+ V_FW_PARAMS_PARAM_Z(0))
+
+ params[0] = FW_PARAM_DEV(NTID);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ params, val);
+ if (ret < 0)
+ return ret;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+
+ user_filter_perc = 100;
+ n_user_filters = mult_frac(adap->tids.nftids,
+ user_filter_perc,
+ 100);
+
+ adap->tids.nftids = n_user_filters;
+ adap->params.hash_filter = 1;
+ return 0;
+}
+
+/**
+ * Validate if the requested filter specification can be set by checking
+ * if the requested features have been enabled
+ */
+int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
+{
+ u32 fconf;
+
+ /*
+ * Check for unconfigured fields being used.
+ */
+ fconf = adapter->params.tp.vlan_pri_map;
+
+#define S(_field) \
+ (fs->val._field || fs->mask._field)
+#define U(_mask, _field) \
+ (!(fconf & (_mask)) && S(_field))
+
+ if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
+ return -EOPNOTSUPP;
+
+#undef S
+#undef U
+
+ /*
+ * If the user is requesting that the filter action loop
+ * matching packets back out one of our ports, make sure that
+ * the egress port is in range.
+ */
+ if (fs->action == FILTER_SWITCH &&
+ fs->eport >= adapter->params.nports)
+ return -ERANGE;
+
+ /*
+ * Don't allow various trivially obvious bogus out-of-range
+ * values ...
+ */
+ if (fs->val.iport >= adapter->params.nports)
+ return -ERANGE;
+
+ return 0;
+}
+
+/**
+ * Get the queue to which the traffic must be steered to.
+ */
+static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
+ struct ch_filter_specification *fs)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int iq;
+
+ /*
+ * If the user has requested steering matching Ingress Packets
+ * to a specific Queue Set, we need to make sure it's in range
+ * for the port and map that into the Absolute Queue ID of the
+ * Queue Set's Response Queue.
+ */
+ if (!fs->dirsteer) {
+ iq = 0;
+ } else {
+ /*
+ * If the iq id is greater than the number of qsets,
+ * then assume it is an absolute qid.
+ */
+ if (fs->iq < pi->n_rx_qsets)
+ iq = adapter->sge.ethrxq[pi->first_qset +
+ fs->iq].rspq.abs_id;
+ else
+ iq = fs->iq;
+ }
+
+ return iq;
+}
+
+/* Return an error number if the indicated filter isn't writable ... */
+int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * Send CPL_SET_TCB_FIELD message
+ */
+static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
+ u16 word, u64 mask, u64 val, int no_reply)
+{
+ struct rte_mbuf *mbuf;
+ struct cpl_set_tcb_field *req;
+ struct sge_ctrl_txq *ctrlq;
+
+ ctrlq = &adapter->sge.ctrlq[0];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ WARN_ON(!mbuf);
+
+ mbuf->data_len = sizeof(*req);
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+ memset(req, 0, sizeof(*req));
+ INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
+ req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
+ V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
+ V_NO_REPLY(no_reply));
+ req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+
+ t4_mgmt_tx(ctrlq, mbuf);
+}
+
+/**
+ * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
+ */
+static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
+ struct cpl_set_tcb_field *req,
+ unsigned int word,
+ u64 mask, u64 val, u8 cookie,
+ int no_reply)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
+ req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
+ V_QUEUENO(0));
+ req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+ sc = (struct ulptx_idata *)(req + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Check if entry already filled.
+ */
+bool is_filter_set(struct tid_info *t, int fidx, int family)
+{
+ bool result = FALSE;
+ int i, max;
+
+ /* IPv6 requires four slots and IPv4 requires only 1 slot.
+ * Ensure, there's enough slots available.
+ */
+ max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
+
+ t4_os_lock(&t->ftid_lock);
+ for (i = fidx; i <= max; i++) {
+ if (rte_bitmap_get(t->ftid_bmap, i)) {
+ result = TRUE;
+ break;
+ }
+ }
+ t4_os_unlock(&t->ftid_lock);
+ return result;
+}
+
+/**
+ * Allocate a available free entry
+ */
+int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
+{
+ struct tid_info *t = &adap->tids;
+ int pos;
+ int size = t->nftids;
+
+ t4_os_lock(&t->ftid_lock);
+ if (family == FILTER_TYPE_IPV6)
+ pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
+ else
+ pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
+ t4_os_unlock(&t->ftid_lock);
+
+ return pos < size ? pos : -1;
+}
+
+/**
+ * Construct hash filter ntuple.
+ */
+static u64 hash_filter_ntuple(const struct filter_entry *f)
+{
+ struct adapter *adap = ethdev2adap(f->dev);
+ struct tp_params *tp = &adap->params.tp;
+ u64 ntuple = 0;
+ u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
+
+ if (tp->port_shift >= 0)
+ ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
+
+ if (tp->protocol_shift >= 0) {
+ if (!f->fs.val.proto)
+ ntuple |= (u64)tcp_proto << tp->protocol_shift;
+ else
+ ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
+ }
+
+ if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
+ ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
+
+ if (ntuple != tp->hash_filter_mask)
+ return 0;
+
+ return ntuple;
+}
+
+/**
+ * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
+ */
+static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
+ unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*abort_req) -
+ sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+ abort_req->rsvd0 = cpu_to_be32(0);
+ abort_req->rsvd1 = 0;
+ abort_req->cmd = CPL_ABORT_NO_RST;
+ sc = (struct ulptx_idata *)(abort_req + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
+ */
+static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
+ unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*abort_rpl) -
+ sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ abort_rpl->rsvd0 = cpu_to_be32(0);
+ abort_rpl->rsvd1 = 0;
+ abort_rpl->cmd = CPL_ABORT_NO_RST;
+ sc = (struct ulptx_idata *)(abort_rpl + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Delete the specified hash filter.
+ */
+static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
+ unsigned int filter_id,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ int ret;
+
+ if (filter_id > adapter->tids.ntids)
+ return -E2BIG;
+
+ f = lookup_tid(t, filter_id);
+ if (!f) {
+ dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ unsigned int wrlen;
+ struct rte_mbuf *mbuf;
+ struct work_request_hdr *wr;
+ struct ulptx_idata *aligner;
+ struct cpl_set_tcb_field *req;
+ struct cpl_abort_req *abort_req;
+ struct cpl_abort_rpl *abort_rpl;
+
+ f->ctx = ctx;
+ f->pending = 1;
+
+ wrlen = cxgbe_roundup(sizeof(*wr) +
+ (sizeof(*req) + sizeof(*aligner)) +
+ sizeof(*abort_req) + sizeof(*abort_rpl),
+ 16);
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ dev_err(adapter, "%s: could not allocate skb ..\n",
+ __func__);
+ goto out_err;
+ }
+
+ mbuf->data_len = wrlen;
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+ INIT_ULPTX_WR(req, wrlen, 0, 0);
+ wr = (struct work_request_hdr *)req;
+ wr++;
+ req = (struct cpl_set_tcb_field *)wr;
+ mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
+ V_TCB_RSS_INFO(M_TCB_RSS_INFO),
+ V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
+ 0, 1);
+ aligner = (struct ulptx_idata *)(req + 1);
+ abort_req = (struct cpl_abort_req *)(aligner + 1);
+ mk_abort_req_ulp(abort_req, f->tid);
+ abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
+ mk_abort_rpl_ulp(abort_rpl, f->tid);
+ t4_mgmt_tx(ctrlq, mbuf);
+ }
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/**
+ * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
+ */
+static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req6 *req = NULL;
+ u64 local_lo, local_hi, peer_lo, peer_hi;
+ u32 *lip = (u32 *)f->fs.val.lip;
+ u32 *fip = (u32 *)f->fs.val.fip;
+
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T6:
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
+
+ INIT_TP_WR(req, 0);
+ break;
+ default:
+ dev_err(adap, "%s: unsupported chip type!\n", __func__);
+ return;
+ }
+
+ local_hi = ((u64)lip[1]) << 32 | lip[0];
+ local_lo = ((u64)lip[3]) << 32 | lip[2];
+ peer_hi = ((u64)fip[1]) << 32 | fip[0];
+ peer_lo = ((u64)fip[3]) << 32 | fip[2];
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip_hi = local_hi;
+ req->local_ip_lo = local_lo;
+ req->peer_ip_hi = peer_hi;
+ req->peer_ip_lo = peer_lo;
+ req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+ V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+ << 1) |
+ V_TX_CHAN(f->fs.eport) |
+ V_ULP_MODE(ULP_MODE_NONE) |
+ F_TCAM_BYPASS | F_NON_OFFLOAD);
+ req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+ req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+ V_RSS_QUEUE(f->fs.iq) |
+ F_T5_OPT_2_VALID |
+ F_RX_CHANNEL |
+ V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
+}
+
+/**
+ * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
+ */
+static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req *req = NULL;
+
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T6:
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
+
+ INIT_TP_WR(req, 0);
+ break;
+ default:
+ dev_err(adap, "%s: unsupported chip type!\n", __func__);
+ return;
+ }
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
+ f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
+ req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
+ f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+ req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+ V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+ << 1) |
+ V_TX_CHAN(f->fs.eport) |
+ V_ULP_MODE(ULP_MODE_NONE) |
+ F_TCAM_BYPASS | F_NON_OFFLOAD);
+ req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+ req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+ V_RSS_QUEUE(f->fs.iq) |
+ F_T5_OPT_2_VALID |
+ F_RX_CHANNEL |
+ V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
+}
+
+/**
+ * Set the specified hash filter.
+ */
+static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct rte_mbuf *mbuf;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int iq;
+ int atid, size;
+ int ret = 0;
+
+ ret = validate_filter(adapter, fs);
+ if (ret)
+ return ret;
+
+ iq = get_filter_steerq(dev, fs);
+
+ ctrlq = &adapter->sge.ctrlq[pi->port_id];
+
+ f = t4_os_alloc(sizeof(*f));
+ if (!f)
+ goto out_err;
+
+ f->fs = *fs;
+ f->ctx = ctx;
+ f->dev = dev;
+ f->fs.iq = iq;
+
+ atid = cxgbe_alloc_atid(t, f);
+ if (atid < 0)
+ goto out_err;
+
+ if (f->fs.type) {
+ /* IPv6 hash filter */
+ f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
+ if (!f->clipt)
+ goto free_atid;
+
+ size = sizeof(struct cpl_t6_act_open_req6);
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto free_clip;
+ }
+
+ mbuf->data_len = size;
+ mbuf->pkt_len = mbuf->data_len;
+
+ mk_act_open_req6(f, mbuf,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ } else {
+ /* IPv4 hash filter */
+ size = sizeof(struct cpl_t6_act_open_req);
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto free_atid;
+ }
+
+ mbuf->data_len = size;
+ mbuf->pkt_len = mbuf->data_len;
+
+ mk_act_open_req(f, mbuf,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ }
+
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+
+free_clip:
+ cxgbe_clip_release(f->dev, f->clipt);
+free_atid:
+ cxgbe_free_atid(t, atid);
+
+out_err:
+ t4_os_free(f);
+ return ret;
+}
+
+/**
+ * Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+void clear_filter(struct filter_entry *f)
+{
+ if (f->clipt)
+ cxgbe_clip_release(f->dev, f->clipt);
+
+ /*
+ * The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+/**
+ * t4_mk_filtdelwr - create a delete filter WR
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter. If @qid is
+ * negative the delete notification is suppressed.
+ */
+static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
+{
+ memset(wr, 0, sizeof(*wr));
+ wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
+ wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_NOREPLY(qid < 0));
+ wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
+ if (qid >= 0)
+ wr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
+/**
+ * Create FW work request to delete the filter at a specified index
+ */
+static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct rte_mbuf *mbuf;
+ struct fw_filter_wr *fwr;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf)
+ return -ENOMEM;
+
+ mbuf->data_len = sizeof(*fwr);
+ mbuf->pkt_len = mbuf->data_len;
+
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /*
+ * Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+}
+
+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct rte_mbuf *mbuf;
+ struct fw_filter_wr *fwr;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ int ret;
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mbuf->data_len = sizeof(*fwr);
+ mbuf->pkt_len = mbuf->data_len;
+
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
+ memset(fwr, 0, sizeof(*fwr));
+
+ /*
+ * Construct the work request to set the filter.
+ */
+ fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
+ fwr->tid_to_iq =
+ cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
+ V_FW_FILTER_WR_RQTYPE(f->fs.type) |
+ V_FW_FILTER_WR_NOREPLY(0) |
+ V_FW_FILTER_WR_IQ(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
+ V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+ V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+ V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+ V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
+ V_FW_FILTER_WR_PRIO(f->fs.prio));
+ fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
+ fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
+ fwr->smac_sel = 0;
+ fwr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
+ V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
+ ));
+ fwr->maci_to_matchtypem =
+ cpu_to_be32(V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+ V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = cpu_to_be16(f->fs.val.lport);
+ fwr->lpm = cpu_to_be16(f->fs.mask.lport);
+ fwr->fp = cpu_to_be16(f->fs.val.fport);
+ fwr->fpm = cpu_to_be16(f->fs.mask.fport);
+
+ /*
+ * Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+
+out:
+ return ret;
+}
+
+/**
+ * Set the corresponding entry in the bitmap. 4 slots are
+ * marked for IPv6, whereas only 1 slot is marked for IPv4.
+ */
+static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
+{
+ t4_os_lock(&t->ftid_lock);
+ if (rte_bitmap_get(t->ftid_bmap, fidx)) {
+ t4_os_unlock(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ if (family == FILTER_TYPE_IPV4) {
+ rte_bitmap_set(t->ftid_bmap, fidx);
+ } else {
+ rte_bitmap_set(t->ftid_bmap, fidx);
+ rte_bitmap_set(t->ftid_bmap, fidx + 1);
+ rte_bitmap_set(t->ftid_bmap, fidx + 2);
+ rte_bitmap_set(t->ftid_bmap, fidx + 3);
+ }
+ t4_os_unlock(&t->ftid_lock);
+ return 0;
+}
+
+/**
+ * Clear the corresponding entry in the bitmap. 4 slots are
+ * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
+ */
+static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
+{
+ t4_os_lock(&t->ftid_lock);
+ if (family == FILTER_TYPE_IPV4) {
+ rte_bitmap_clear(t->ftid_bmap, fidx);
+ } else {
+ rte_bitmap_clear(t->ftid_bmap, fidx);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 1);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 2);
+ rte_bitmap_clear(t->ftid_bmap, fidx + 3);
+ }
+ t4_os_unlock(&t->ftid_lock);
+}
+
+/**
+ * Check a delete filter request for validity and send it to the hardware.
+ * Return 0 on success, an error number otherwise. We attach any provided
+ * filter operation context to the internal filter specification in order to
+ * facilitate signaling completion of the operation.
+ */
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct filter_entry *f;
+ unsigned int chip_ver;
+ int ret;
+
+ if (is_hashfilter(adapter) && fs->cap)
+ return cxgbe_del_hash_filter(dev, filter_id, ctx);
+
+ if (filter_id >= adapter->tids.nftids)
+ return -ERANGE;
+
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ ret = is_filter_set(&adapter->tids, filter_id, fs->type);
+ if (!ret) {
+ dev_warn(adap, "%s: could not find filter entry: %u\n",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure filter id is aligned on the 2 slot boundary for T6,
+ * and 4 slot boundary for cards below T6.
+ */
+ if (fs->type) {
+ if (chip_ver < CHELSIO_T6)
+ filter_id &= ~(0x3);
+ else
+ filter_id &= ~(0x1);
+ }
+
+ f = &adapter->tids.ftid_tab[filter_id];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ f->ctx = ctx;
+ cxgbe_clear_ftid(&adapter->tids,
+ f->tid - adapter->tids.ftid_base,
+ f->fs.type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ return del_filter_wr(dev, filter_id);
+ }
+
+ /*
+ * If the caller has passed in a Completion Context then we need to
+ * mark it as a successful completion so they don't stall waiting
+ * for it.
+ */
+ if (ctx) {
+ ctx->result = 0;
+ t4_complete(&ctx->completion);
+ }
+
+ return 0;
+}
+
+/**
+ * Check a Chelsio Filter Request for validity, convert it into our internal
+ * format and send it to the hardware. Return 0 on success, an error number
+ * otherwise. We attach any provided filter operation context to the internal
+ * filter specification in order to facilitate signaling completion of the
+ * operation.
+ */
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int fidx, iq, fid_bit = 0;
+ struct filter_entry *f;
+ unsigned int chip_ver;
+ uint8_t bitoff[16] = {0};
+ int ret;
+
+ if (is_hashfilter(adapter) && fs->cap)
+ return cxgbe_set_hash_filter(dev, fs, ctx);
+
+ if (filter_id >= adapter->tids.nftids)
+ return -ERANGE;
+
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ ret = validate_filter(adapter, fs);
+ if (ret)
+ return ret;
+
+ /*
+ * Ensure filter id is aligned on the 4 slot boundary for IPv6
+ * maskfull filters.
+ */
+ if (fs->type)
+ filter_id &= ~(0x3);
+
+ ret = is_filter_set(&adapter->tids, filter_id, fs->type);
+ if (ret)
+ return -EBUSY;
+
+ iq = get_filter_steerq(dev, fs);
+
+ /*
+ * IPv6 filters occupy four slots and must be aligned on four-slot
+ * boundaries for T5. On T6, IPv6 filters occupy two-slots and
+ * must be aligned on two-slot boundaries.
+ *
+ * IPv4 filters only occupy a single slot and have no alignment
+ * requirements but writing a new IPv4 filter into the middle
+ * of an existing IPv6 filter requires clearing the old IPv6
+ * filter.
+ */
+ if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
+ /*
+ * For T6, If our IPv4 filter isn't being written to a
+ * multiple of two filter index and there's an IPv6
+ * filter at the multiple of 2 base slot, then we need
+ * to delete that IPv6 filter ...
+ * For adapters below T6, IPv6 filter occupies 4 entries.
+ */
+ if (chip_ver < CHELSIO_T6)
+ fidx = filter_id & ~0x3;
+ else
+ fidx = filter_id & ~0x1;
+
+ if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid)
+ return -EBUSY;
+ }
+ } else { /* IPv6 */
+ unsigned int max_filter_id;
+
+ if (chip_ver < CHELSIO_T6) {
+ /*
+ * Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3)
+ return -EINVAL;
+
+ max_filter_id = filter_id + 4;
+ } else {
+ /*
+ * For T6, CLIP being enabled, IPv6 filter would occupy
+ * 2 entries.
+ */
+ if (filter_id & 0x1)
+ return -EINVAL;
+
+ max_filter_id = filter_id + 2;
+ }
+
+ /*
+ * Check all except the base overlapping IPv4 filter
+ * slots.
+ */
+ for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid)
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * Check to make sure that provided filter index is not
+ * already in use by someone else
+ */
+ f = &adapter->tids.ftid_tab[filter_id];
+ if (f->valid)
+ return -EBUSY;
+
+ fidx = adapter->tids.ftid_base + filter_id;
+ fid_bit = filter_id;
+ ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
+ if (ret)
+ return ret;
+
+ /*
+ * Check to make sure the filter requested is writable ...
+ */
+ ret = writable_filter(f);
+ if (ret) {
+ /* Clear the bits we have set above */
+ cxgbe_clear_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ return ret;
+ }
+
+ /*
+ * Allocate a clip table entry only if we have non-zero IPv6 address
+ */
+ if (chip_ver > CHELSIO_T5 && fs->type &&
+ memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
+ f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
+ if (!f->clipt)
+ goto free_tid;
+ }
+
+ /*
+ * Convert the filter specification into our internal format.
+ * We copy the PF/VF specification into the Outer VLAN field
+ * here so the rest of the code -- including the interface to
+ * the firmware -- doesn't have to constantly do these checks.
+ */
+ f->fs = *fs;
+ f->fs.iq = iq;
+ f->dev = dev;
+
+ /*
+ * Attempt to set the filter. If we don't succeed, we clear
+ * it and return the failure.
+ */
+ f->ctx = ctx;
+ f->tid = fidx; /* Save the actual tid */
+ ret = set_filter_wr(dev, filter_id);
+ if (ret) {
+ fid_bit = f->tid - adapter->tids.ftid_base;
+ goto free_tid;
+ }
+
+ return ret;
+
+free_tid:
+ cxgbe_clear_ftid(&adapter->tids, fid_bit,
+ fs->type ? FILTER_TYPE_IPV6 :
+ FILTER_TYPE_IPV4);
+ clear_filter(f);
+ return ret;
+}
+
+/**
+ * Handle a Hash filter write reply.
+ */
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
+{
+ struct tid_info *t = &adap->tids;
+ struct filter_entry *f;
+ struct filter_ctx *ctx = NULL;
+ unsigned int tid = GET_TID(rpl);
+ unsigned int ftid = G_TID_TID(G_AOPEN_ATID
+ (be32_to_cpu(rpl->atid_status)));
+ unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+
+ f = lookup_atid(t, ftid);
+ if (!f) {
+ dev_warn(adap, "%s: could not find filter entry: %d\n",
+ __func__, ftid);
+ return;
+ }
+
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ switch (status) {
+ case CPL_ERR_NONE: {
+ f->tid = tid;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+
+ cxgbe_insert_tid(t, f, f->tid, 0);
+ cxgbe_free_atid(t, ftid);
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ if (f->fs.hitcnts)
+ set_tcb_field(adap, tid,
+ W_TCB_TIMESTAMP,
+ V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
+ V_TCB_T_RTT_TS_RECENT_AGE
+ (M_TCB_T_RTT_TS_RECENT_AGE),
+ V_TCB_TIMESTAMP(0ULL) |
+ V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
+ 1);
+ break;
+ }
+ default:
+ dev_warn(adap, "%s: filter creation failed with status = %u\n",
+ __func__, status);
+
+ if (ctx) {
+ if (status == CPL_ERR_TCAM_FULL)
+ ctx->result = -EAGAIN;
+ else
+ ctx->result = -EINVAL;
+ }
+
+ cxgbe_free_atid(t, ftid);
+ t4_os_free(f);
+ }
+
+ if (ctx)
+ t4_complete(&ctx->completion);
+}
+
+/**
+ * Handle a LE-TCAM filter write/deletion reply.
+ */
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ struct filter_entry *f = NULL;
+ unsigned int tid = GET_TID(rpl);
+ int idx, max_fidx = adap->tids.nftids;
+
+ /* Get the corresponding filter entry for this tid */
+ if (adap->tids.ftid_tab) {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+
+ f = &adap->tids.ftid_tab[idx];
+ if (f->tid != tid)
+ return;
+ }
+
+ /* We found the filter entry for this tid */
+ if (f) {
+ unsigned int ret = G_COOKIE(rpl->cookie);
+ struct filter_ctx *ctx;
+
+ /*
+ * Pull off any filter operation context attached to the
+ * filter.
+ */
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ } else if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /*
+ * Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(f);
+ if (ctx)
+ ctx->result = 0;
+ } else {
+ /*
+ * Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_warn(adap, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(f);
+ if (ctx)
+ ctx->result = -EINVAL;
+ }
+
+ if (ctx)
+ t4_complete(&ctx->completion);
+ }
+}
+
+/*
+ * Retrieve the packet count for the specified filter.
+ */
+int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *c, int hash, bool get_byte)
+{
+ struct filter_entry *f;
+ unsigned int tcb_base, tcbaddr;
+ int ret;
+
+ tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
+ if (is_hashfilter(adapter) && hash) {
+ if (fidx < adapter->tids.ntids) {
+ f = adapter->tids.tid_tab[fidx];
+ if (!f)
+ return -EINVAL;
+
+ if (is_t5(adapter->params.chip)) {
+ *c = 0;
+ return 0;
+ }
+ tcbaddr = tcb_base + (fidx * TCB_SIZE);
+ goto get_count;
+ } else {
+ return -ERANGE;
+ }
+ } else {
+ if (fidx >= adapter->tids.nftids)
+ return -ERANGE;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+
+ tcbaddr = tcb_base + f->tid * TCB_SIZE;
+ }
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+
+get_count:
+ if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
+ /*
+ * For T5, the Filter Packet Hit Count is maintained as a
+ * 32-bit Big Endian value in the TCB field {timestamp}.
+ * Similar to the craziness above, instead of the filter hit
+ * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
+ * sizeof(u32)), it actually shows up at offset 24. Whacky.
+ */
+ if (get_byte) {
+ unsigned int word_offset = 4;
+ __be64 be64_byte_count;
+
+ t4_os_lock(&adapter->win0_lock);
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr +
+ (word_offset * sizeof(__be32)),
+ sizeof(be64_byte_count),
+ &be64_byte_count,
+ T4_MEMORY_READ);
+ t4_os_unlock(&adapter->win0_lock);
+ if (ret < 0)
+ return ret;
+ *c = be64_to_cpu(be64_byte_count);
+ } else {
+ unsigned int word_offset = 6;
+ __be32 be32_count;
+
+ t4_os_lock(&adapter->win0_lock);
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr +
+ (word_offset * sizeof(__be32)),
+ sizeof(be32_count), &be32_count,
+ T4_MEMORY_READ);
+ t4_os_unlock(&adapter->win0_lock);
+ if (ret < 0)
+ return ret;
+ *c = (u64)be32_to_cpu(be32_count);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Handle a Hash filter delete reply.
+ */
+void hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl)
+{
+ struct tid_info *t = &adap->tids;
+ struct filter_entry *f;
+ struct filter_ctx *ctx = NULL;
+ unsigned int tid = GET_TID(rpl);
+
+ f = lookup_tid(t, tid);
+ if (!f) {
+ dev_warn(adap, "%s: could not find filter entry: %u\n",
+ __func__, tid);
+ return;
+ }
+
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ f->valid = 0;
+
+ if (f->clipt)
+ cxgbe_clip_release(f->dev, f->clipt);
+
+ cxgbe_remove_tid(t, 0, tid, 0);
+ t4_os_free(f);
+
+ if (ctx) {
+ ctx->result = 0;
+ t4_complete(&ctx->completion);
+ }
+}
diff --git a/drivers/net/cxgbe/cxgbe_filter.h b/drivers/net/cxgbe/cxgbe_filter.h
new file mode 100644
index 00000000..af8fa752
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_filter.h
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_FILTER_H_
+#define _CXGBE_FILTER_H_
+
+#include "t4_msg.h"
+/*
+ * Defined bit width of user definable filter tuples
+ */
+#define ETHTYPE_BITWIDTH 16
+#define FRAG_BITWIDTH 1
+#define MACIDX_BITWIDTH 9
+#define FCOE_BITWIDTH 1
+#define IPORT_BITWIDTH 3
+#define MATCHTYPE_BITWIDTH 3
+#define PROTO_BITWIDTH 8
+#define TOS_BITWIDTH 8
+#define PF_BITWIDTH 8
+#define VF_BITWIDTH 8
+#define IVLAN_BITWIDTH 16
+#define OVLAN_BITWIDTH 16
+
+/*
+ * Filter matching rules. These consist of a set of ingress packet field
+ * (value, mask) tuples. The associated ingress packet field matches the
+ * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
+ * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
+ * matches an ingress packet when all of the individual individual field
+ * matching rules are true.
+ *
+ * Partial field masks are always valid, however, while it may be easy to
+ * understand their meanings for some fields (e.g. IP address to match a
+ * subnet), for others making sensible partial masks is less intuitive (e.g.
+ * MPS match type) ...
+ */
+struct ch_filter_tuple {
+ /*
+ * Compressed header matching field rules. The TP_VLAN_PRI_MAP
+ * register selects which of these fields will participate in the
+ * filter match rules -- up to a maximum of 36 bits. Because
+ * TP_VLAN_PRI_MAP is a global register, all filters must use the same
+ * set of fields.
+ */
+ uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
+ uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
+ uint32_t ivlan_vld:1; /* inner VLAN valid */
+ uint32_t ovlan_vld:1; /* outer VLAN valid */
+ uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
+ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
+ uint32_t iport:IPORT_BITWIDTH; /* ingress port */
+ uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
+ uint32_t proto:PROTO_BITWIDTH; /* protocol type */
+ uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
+ uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
+ uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
+ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
+ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+
+ /*
+ * Uncompressed header matching field rules. These are always
+ * available for field rules.
+ */
+ uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
+ uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
+ uint16_t lport; /* local port */
+ uint16_t fport; /* foreign port */
+
+ /* reservations for future additions */
+ uint8_t rsvd[12];
+};
+
+/*
+ * Filter specification
+ */
+struct ch_filter_specification {
+ /* Administrative fields for filter. */
+ uint32_t hitcnts:1; /* count filter hits in TCB */
+ uint32_t prio:1; /* filter has priority over active/server */
+
+ /*
+ * Fundamental filter typing. This is the one element of filter
+ * matching that doesn't exist as a (value, mask) tuple.
+ */
+ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
+ uint32_t cap:1; /* 0 => LE-TCAM, 1 => Hash */
+
+ /*
+ * Packet dispatch information. Ingress packets which match the
+ * filter rules will be dropped, passed to the host or switched back
+ * out as egress packets.
+ */
+ uint32_t action:2; /* drop, pass, switch */
+
+ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
+ uint32_t iq:10; /* ingress queue */
+
+ uint32_t eport:2; /* egress port to switch packet out */
+
+ /* Filter rule value/mask pairs. */
+ struct ch_filter_tuple val;
+ struct ch_filter_tuple mask;
+};
+
+enum {
+ FILTER_PASS = 0, /* default */
+ FILTER_DROP,
+ FILTER_SWITCH
+};
+
+enum filter_type {
+ FILTER_TYPE_IPV4 = 0,
+ FILTER_TYPE_IPV6,
+};
+
+struct t4_completion {
+ unsigned int done; /* completion done (0 - No, 1 - Yes) */
+ rte_spinlock_t lock; /* completion lock */
+};
+
+/*
+ * Filter operation context to allow callers to wait for
+ * an asynchronous completion.
+ */
+struct filter_ctx {
+ struct t4_completion completion; /* completion rendezvous */
+ int result; /* result of operation */
+ u32 tid; /* to store tid of hash filter */
+};
+
+/*
+ * Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware or the
+ * firmware command.
+ */
+struct filter_entry {
+ /*
+ * Administrative fields for filter.
+ */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+ u32 pending:1; /* filter action is pending FW reply */
+ struct filter_ctx *ctx; /* caller's completion hook */
+ struct clip_entry *clipt; /* CLIP Table entry for IPv6 */
+ struct rte_eth_dev *dev; /* Port's rte eth device */
+ void *private; /* For use by apps using filter_entry */
+
+ /* This will store the actual tid */
+ u32 tid;
+
+ /*
+ * The filter itself.
+ */
+ struct ch_filter_specification fs;
+};
+
+#define FILTER_ID_MAX (~0U)
+
+struct tid_info;
+struct adapter;
+
+/**
+ * Find first clear bit in the bitmap.
+ */
+static inline unsigned int cxgbe_find_first_zero_bit(struct rte_bitmap *bmap,
+ unsigned int size)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < size; idx++)
+ if (!rte_bitmap_get(bmap, idx))
+ break;
+
+ return idx;
+}
+
+/**
+ * Find a free region of 'num' consecutive entries.
+ */
+static inline unsigned int
+cxgbe_bitmap_find_free_region(struct rte_bitmap *bmap, unsigned int size,
+ unsigned int num)
+{
+ unsigned int idx, j, free = 0;
+
+ if (num > size)
+ return size;
+
+ for (idx = 0; idx < size; idx += num) {
+ for (j = 0; j < num; j++) {
+ if (!rte_bitmap_get(bmap, idx + j)) {
+ free++;
+ } else {
+ free = 0;
+ break;
+ }
+ }
+
+ /* Found the Region */
+ if (free == num)
+ break;
+
+ /* Reached the end and still no region found */
+ if ((idx + num) > size) {
+ idx = size;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+bool is_filter_set(struct tid_info *, int fidx, int family);
+void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+void clear_filter(struct filter_entry *f);
+int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx);
+int writable_filter(struct filter_entry *f);
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family);
+int init_hash_filter(struct adapter *adap);
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl);
+void hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl);
+int validate_filter(struct adapter *adap, struct ch_filter_specification *fs);
+int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *c, int hash, bool get_byte);
+#endif /* _CXGBE_FILTER_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c
new file mode 100644
index 00000000..01c945f1
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_flow.c
@@ -0,0 +1,845 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include "common.h"
+#include "cxgbe_flow.h"
+
+#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
+do { \
+ if (!((fs)->val.elem || (fs)->mask.elem)) { \
+ (fs)->val.elem = (__v); \
+ (fs)->mask.elem = (__m); \
+ } else { \
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
+ NULL, "a filter can be specified" \
+ " only once"); \
+ } \
+} while (0)
+
+#define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
+do { \
+ memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
+ memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
+} while (0)
+
+#define CXGBE_FILL_FS(v, m, elem) \
+ __CXGBE_FILL_FS(v, m, fs, elem, e)
+
+#define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
+ __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
+
+static int
+cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
+{
+ /* rte_flow specification does not allow it. */
+ if (!i->spec && (i->mask || i->last))
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "last or mask given without spec");
+ /*
+ * We don't support it.
+ * Although, we can support values in last as 0's or last == spec.
+ * But this will not provide user with any additional functionality
+ * and will only increase the complexity for us.
+ */
+ if (i->last)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "last is not supported by chelsio pmd");
+ return 0;
+}
+
+static void
+cxgbe_fill_filter_region(struct adapter *adap,
+ struct ch_filter_specification *fs)
+{
+ struct tp_params *tp = &adap->params.tp;
+ u64 hash_filter_mask = tp->hash_filter_mask;
+ u64 ntuple_mask = 0;
+
+ fs->cap = 0;
+
+ if (!is_hashfilter(adap))
+ return;
+
+ if (fs->type) {
+ uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff};
+ uint8_t bitoff[16] = {0};
+
+ if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, biton, sizeof(biton)))
+ return;
+ } else {
+ uint32_t biton = 0xffffffff;
+ uint32_t bitoff = 0x0U;
+
+ if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, &biton, sizeof(biton)))
+ return;
+ }
+
+ if (!fs->val.lport || fs->mask.lport != 0xffff)
+ return;
+ if (!fs->val.fport || fs->mask.fport != 0xffff)
+ return;
+
+ if (tp->protocol_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
+ if (tp->ethertype_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
+ if (tp->port_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
+
+ if (ntuple_mask != hash_filter_mask)
+ return;
+
+ fs->cap = 1; /* use hash region */
+}
+
+static int
+ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_phy_port *val = item->spec;
+ const struct rte_flow_item_phy_port *umask = item->mask;
+ const struct rte_flow_item_phy_port *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
+
+ if (val->index > 0x7)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "port index upto 0x7 is supported");
+
+ CXGBE_FILL_FS(val->index, mask->index, iport);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_udp *val = item->spec;
+ const struct rte_flow_item_udp *umask = item->mask;
+ const struct rte_flow_item_udp *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
+
+ if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "udp: only src/dst port supported");
+
+ CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
+ if (!val)
+ return 0;
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+ be16_to_cpu(mask->hdr.src_port), fport);
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+ be16_to_cpu(mask->hdr.dst_port), lport);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_tcp *val = item->spec;
+ const struct rte_flow_item_tcp *umask = item->mask;
+ const struct rte_flow_item_tcp *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
+
+ if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
+ mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
+ mask->hdr.tcp_urp)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "tcp: only src/dst port supported");
+
+ CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
+ if (!val)
+ return 0;
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+ be16_to_cpu(mask->hdr.src_port), fport);
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+ be16_to_cpu(mask->hdr.dst_port), lport);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_ipv4 *val = item->spec;
+ const struct rte_flow_item_ipv4 *umask = item->mask;
+ const struct rte_flow_item_ipv4 *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
+
+ if (mask->hdr.time_to_live || mask->hdr.type_of_service)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "ttl/tos are not supported");
+
+ fs->type = FILTER_TYPE_IPV4;
+ CXGBE_FILL_FS(ETHER_TYPE_IPv4, 0xffff, ethtype);
+ if (!val)
+ return 0; /* ipv4 wild card */
+
+ CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_ipv6 *val = item->spec;
+ const struct rte_flow_item_ipv6 *umask = item->mask;
+ const struct rte_flow_item_ipv6 *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
+
+ if (mask->hdr.vtc_flow ||
+ mask->hdr.payload_len || mask->hdr.hop_limits)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "tc/flow/hop are not supported");
+
+ fs->type = FILTER_TYPE_IPV6;
+ CXGBE_FILL_FS(ETHER_TYPE_IPv6, 0xffff, ethtype);
+ if (!val)
+ return 0; /* ipv6 wild card */
+
+ CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+
+ return 0;
+}
+
+static int
+cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
+ struct rte_flow_error *e)
+{
+ if (attr->egress)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, "attribute:<egress> is"
+ " not supported !");
+ if (attr->group > 0)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, "group parameter is"
+ " not supported.");
+
+ flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
+
+ return 0;
+}
+
+static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+
+ if (rxq > pi->n_rx_qsets)
+ return -EINVAL;
+ return 0;
+}
+
+static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
+{
+ struct adapter *adap = ethdev2adap(f->dev);
+ struct ch_filter_specification fs = f->fs;
+
+ if (fidx >= adap->tids.nftids) {
+ dev_err(adap, "invalid flow index %d.\n", fidx);
+ return -EINVAL;
+ }
+ if (!is_filter_set(&adap->tids, fidx, fs.type)) {
+ dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
+ struct adapter *adap, unsigned int fidx)
+{
+ if (is_filter_set(&adap->tids, fidx, fs->type)) {
+ dev_err(adap, "filter index: %d is busy.\n", fidx);
+ return -EBUSY;
+ }
+ if (fidx >= adap->tids.nftids) {
+ dev_err(adap, "filter index (%u) >= max(%u)\n",
+ fidx, adap->tids.nftids);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
+{
+ if (flow->fs.cap)
+ return 0; /* Hash filters */
+ return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
+ cxgbe_validate_fidxonadd(&flow->fs,
+ ethdev2adap(flow->dev), fidx);
+}
+
+static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ struct adapter *adap = ethdev2adap(flow->dev);
+
+ /* For tcam get the next available slot, if default value specified */
+ if (flow->fidx == FILTER_ID_MAX) {
+ int idx;
+
+ idx = cxgbe_alloc_ftid(adap, fs->type);
+ if (idx < 0) {
+ dev_err(adap, "unable to get a filter index in tcam\n");
+ return -ENOMEM;
+ }
+ *fidx = (unsigned int)idx;
+ } else {
+ *fidx = flow->fidx;
+ }
+
+ return 0;
+}
+
+static int
+ch_rte_parse_atype_switch(const struct rte_flow_action *a,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_action_phy_port *port;
+
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ port = (const struct rte_flow_action_phy_port *)a->conf;
+ fs->eport = port->index;
+ break;
+ default:
+ /* We are not supposed to come here */
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "Action not supported");
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_rtef_parse_actions(struct rte_flow *flow,
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ const struct rte_flow_action_queue *q;
+ const struct rte_flow_action *a;
+ char abit = 0;
+ int ret;
+
+ for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ if (abit++)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "specify only 1 pass/drop");
+ fs->action = FILTER_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ q = (const struct rte_flow_action_queue *)a->conf;
+ if (!q)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, q,
+ "specify rx queue index");
+ if (check_rxq(flow->dev, q->index))
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, q,
+ "Invalid rx queue");
+ if (abit++)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "specify only 1 pass/drop");
+ fs->action = FILTER_PASS;
+ fs->dirsteer = 1;
+ fs->iq = q->index;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ fs->hitcnts = 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ /* We allow multiple switch actions, but switch is
+ * not compatible with either queue or drop
+ */
+ if (abit++ && fs->action != FILTER_SWITCH)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "overlapping action specified");
+ ret = ch_rte_parse_atype_switch(a, fs, e);
+ if (ret)
+ return ret;
+ fs->action = FILTER_SWITCH;
+ break;
+ default:
+ /* Not supported action : return error */
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ a, "Action not supported");
+ }
+ }
+
+ return 0;
+}
+
+struct chrte_fparse parseitem[] = {
+ [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
+ .fptr = ch_rte_parsetype_port,
+ .dmask = &(const struct rte_flow_item_phy_port){
+ .index = 0x7,
+ }
+ },
+
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .fptr = ch_rte_parsetype_ipv4,
+ .dmask = &rte_flow_item_ipv4_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .fptr = ch_rte_parsetype_ipv6,
+ .dmask = &rte_flow_item_ipv6_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .fptr = ch_rte_parsetype_udp,
+ .dmask = &rte_flow_item_udp_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .fptr = ch_rte_parsetype_tcp,
+ .dmask = &rte_flow_item_tcp_mask,
+ },
+};
+
+static int
+cxgbe_rtef_parse_items(struct rte_flow *flow,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ const struct rte_flow_item *i;
+ char repeat[ARRAY_SIZE(parseitem)] = {0};
+
+ for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
+ struct chrte_fparse *idx = &flow->item_parser[i->type];
+ int ret;
+
+ if (i->type > ARRAY_SIZE(parseitem))
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "Item not supported");
+
+ switch (i->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ continue;
+ default:
+ /* check if item is repeated */
+ if (repeat[i->type])
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, i,
+ "parse items cannot be repeated (except void)");
+ repeat[i->type] = 1;
+
+ /* validate the item */
+ ret = cxgbe_validate_item(i, e);
+ if (ret)
+ return ret;
+
+ if (!idx || !idx->fptr) {
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, i,
+ "Item not supported");
+ } else {
+ ret = idx->fptr(idx->dmask, i, &flow->fs, e);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ cxgbe_fill_filter_region(adap, &flow->fs);
+
+ return 0;
+}
+
+static int
+cxgbe_flow_parse(struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ int ret;
+
+ /* parse user request into ch_filter_specification */
+ ret = cxgbe_rtef_parse_attr(flow, attr, e);
+ if (ret)
+ return ret;
+ ret = cxgbe_rtef_parse_items(flow, item, e);
+ if (ret)
+ return ret;
+ return cxgbe_rtef_parse_actions(flow, action, e);
+}
+
+static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ struct adapter *adap = ethdev2adap(dev);
+ struct tid_info *t = &adap->tids;
+ struct filter_ctx ctx;
+ unsigned int fidx;
+ int err;
+
+ if (cxgbe_get_fidx(flow, &fidx))
+ return -ENOMEM;
+ if (cxgbe_verify_fidx(flow, fidx, 0))
+ return -1;
+
+ t4_init_completion(&ctx.completion);
+ /* go create the filter */
+ err = cxgbe_set_filter(dev, fidx, fs, &ctx);
+ if (err) {
+ dev_err(adap, "Error %d while creating filter.\n", err);
+ return err;
+ }
+
+ /* Poll the FW for reply */
+ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
+ CXGBE_FLOW_POLL_US,
+ CXGBE_FLOW_POLL_CNT,
+ &ctx.completion);
+ if (err) {
+ dev_err(adap, "Filter set operation timed out (%d)\n", err);
+ return err;
+ }
+ if (ctx.result) {
+ dev_err(adap, "Hardware error %d while creating the filter.\n",
+ ctx.result);
+ return ctx.result;
+ }
+
+ if (fs->cap) { /* to destroy the filter */
+ flow->fidx = ctx.tid;
+ flow->f = lookup_tid(t, ctx.tid);
+ } else {
+ flow->fidx = fidx;
+ flow->f = &adap->tids.ftid_tab[fidx];
+ }
+
+ return 0;
+}
+
+static struct rte_flow *
+cxgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct rte_flow *flow;
+ int ret;
+
+ flow = t4_os_alloc(sizeof(struct rte_flow));
+ if (!flow) {
+ rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unable to allocate memory for"
+ " filter_entry");
+ return NULL;
+ }
+
+ flow->item_parser = parseitem;
+ flow->dev = dev;
+
+ if (cxgbe_flow_parse(flow, attr, item, action, e)) {
+ t4_os_free(flow);
+ return NULL;
+ }
+
+ /* go, interact with cxgbe_filter */
+ ret = __cxgbe_flow_create(dev, flow);
+ if (ret) {
+ rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unable to create flow rule");
+ t4_os_free(flow);
+ return NULL;
+ }
+
+ flow->f->private = flow; /* Will be used during flush */
+
+ return flow;
+}
+
+static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct filter_entry *f = flow->f;
+ struct ch_filter_specification *fs;
+ struct filter_ctx ctx;
+ int err;
+
+ fs = &f->fs;
+ if (cxgbe_verify_fidx(flow, flow->fidx, 1))
+ return -1;
+
+ t4_init_completion(&ctx.completion);
+ err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
+ if (err) {
+ dev_err(adap, "Error %d while deleting filter.\n", err);
+ return err;
+ }
+
+ /* Poll the FW for reply */
+ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
+ CXGBE_FLOW_POLL_US,
+ CXGBE_FLOW_POLL_CNT,
+ &ctx.completion);
+ if (err) {
+ dev_err(adap, "Filter delete operation timed out (%d)\n", err);
+ return err;
+ }
+ if (ctx.result) {
+ dev_err(adap, "Hardware error %d while deleting the filter.\n",
+ ctx.result);
+ return ctx.result;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *e)
+{
+ int ret;
+
+ ret = __cxgbe_flow_destroy(dev, flow);
+ if (ret)
+ return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ flow, "error destroying filter.");
+ t4_os_free(flow);
+ return 0;
+}
+
+static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
+ u64 *byte_count)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ struct ch_filter_specification fs = flow->f->fs;
+ unsigned int fidx = flow->fidx;
+ int ret = 0;
+
+ ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
+ if (ret)
+ return ret;
+ return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
+}
+
+static int
+cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *data,
+ struct rte_flow_error *e)
+{
+ struct ch_filter_specification fs;
+ struct rte_flow_query_count *c;
+ struct filter_entry *f;
+ int ret;
+
+ RTE_SET_USED(dev);
+
+ f = flow->f;
+ fs = f->fs;
+
+ if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "only count supported for query");
+
+ /*
+ * This is a valid operation, Since we are allowed to do chelsio
+ * specific operations in rte side of our code but not vise-versa
+ *
+ * So, fs can be queried/modified here BUT rte_flow_query_count
+ * cannot be worked on by the lower layer since we want to maintain
+ * it as rte_flow agnostic.
+ */
+ if (!fs.hitcnts)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ &fs, "filter hit counters were not"
+ " enabled during filter creation");
+
+ c = (struct rte_flow_query_count *)data;
+ ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
+ if (ret)
+ return rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
+ f, "cxgbe pmd failed to"
+ " perform query");
+
+ /* Query was successful */
+ c->bytes_set = 1;
+ c->hits_set = 1;
+
+ return 0; /* success / partial_success */
+}
+
+static int
+cxgbe_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct rte_flow *flow;
+ unsigned int fidx;
+ int ret;
+
+ flow = t4_os_alloc(sizeof(struct rte_flow));
+ if (!flow)
+ return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Unable to allocate memory for filter_entry");
+
+ flow->item_parser = parseitem;
+ flow->dev = dev;
+
+ ret = cxgbe_flow_parse(flow, attr, item, action, e);
+ if (ret) {
+ t4_os_free(flow);
+ return ret;
+ }
+
+ if (validate_filter(adap, &flow->fs)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "validation failed. Check f/w config file.");
+ }
+
+ if (cxgbe_get_fidx(flow, &fidx)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "no memory in tcam.");
+ }
+
+ if (cxgbe_verify_fidx(flow, fidx, 0)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "validation failed");
+ }
+
+ t4_os_free(flow);
+ return 0;
+}
+
+/*
+ * @ret : > 0 filter destroyed succsesfully
+ * < 0 error destroying filter
+ * == 1 filter not active / not found
+ */
+static int
+cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev,
+ struct rte_flow_error *e)
+{
+ if (f && (f->valid || f->pending) &&
+ f->dev == dev && /* Only if user has asked for this port */
+ f->private) /* We (rte_flow) created this filter */
+ return cxgbe_flow_destroy(dev, (struct rte_flow *)f->private,
+ e);
+ return 1;
+}
+
+static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ unsigned int i;
+ int ret = 0;
+
+ if (adap->tids.ftid_tab) {
+ struct filter_entry *f = &adap->tids.ftid_tab[0];
+
+ for (i = 0; i < adap->tids.nftids; i++, f++) {
+ ret = cxgbe_check_n_destroy(f, dev, e);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+ if (is_hashfilter(adap) && adap->tids.tid_tab) {
+ struct filter_entry *f;
+
+ for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
+ f = (struct filter_entry *)adap->tids.tid_tab[i];
+
+ ret = cxgbe_check_n_destroy(f, dev, e);
+ if (ret < 0)
+ goto out;
+ }
+ }
+
+out:
+ return ret >= 0 ? 0 : ret;
+}
+
+static const struct rte_flow_ops cxgbe_flow_ops = {
+ .validate = cxgbe_flow_validate,
+ .create = cxgbe_flow_create,
+ .destroy = cxgbe_flow_destroy,
+ .flush = cxgbe_flow_flush,
+ .query = cxgbe_flow_query,
+ .isolate = NULL,
+};
+
+int
+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ RTE_SET_USED(dev);
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &cxgbe_flow_ops;
+ break;
+ default:
+ ret = -ENOTSUP;
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/net/cxgbe/cxgbe_flow.h b/drivers/net/cxgbe/cxgbe_flow.h
new file mode 100644
index 00000000..0f750474
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_flow.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#ifndef _CXGBE_FLOW_H_
+#define _CXGBE_FLOW_H_
+
+#include <rte_flow_driver.h>
+#include "cxgbe_filter.h"
+#include "cxgbe.h"
+
+#define CXGBE_FLOW_POLL_US 10
+#define CXGBE_FLOW_POLL_CNT 10
+
+struct chrte_fparse {
+ int (*fptr)(const void *mask, /* currently supported mask */
+ const struct rte_flow_item *item, /* user input */
+ struct ch_filter_specification *fs, /* where to parse */
+ struct rte_flow_error *e);
+ const void *dmask; /* Specify what is supported by chelsio by default*/
+};
+
+struct rte_flow {
+ struct filter_entry *f;
+ struct ch_filter_specification fs; /* temp, to create filter */
+ struct chrte_fparse *item_parser;
+ /*
+ * filter_entry doesn't store user priority.
+ * Post creation of filter this will indicate the
+ * flow index (fidx) for both hash and tcam filters
+ */
+ unsigned int fidx;
+ struct rte_eth_dev *dev;
+};
+
+int
+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+
+#endif /* _CXGBE_FLOW_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 28db6c06..c3938e8d 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2017 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <sys/queue.h>
@@ -57,14 +29,31 @@
#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
-#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
+#include <rte_kvargs.h>
#include "common.h"
#include "t4_regs.h"
#include "t4_msg.h"
#include "cxgbe.h"
+#include "clip_tbl.h"
+
+/**
+ * Allocate a chunk of memory. The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+ return rte_zmalloc(NULL, size, 0);
+}
+
+/**
+ * Free memory allocated through t4_alloc_mem().
+ */
+void t4_free_mem(void *addr)
+{
+ rte_free(addr);
+}
/*
* Response queue handler for the FW event queue.
@@ -98,6 +87,18 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_fw6_msg *msg = (const void *)rsp;
t4_handle_fw_rpl(q->adapter, msg->data);
+ } else if (opcode == CPL_ABORT_RPL_RSS) {
+ const struct cpl_abort_rpl_rss *p = (const void *)rsp;
+
+ hash_del_filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_SET_TCB_RPL) {
+ const struct cpl_set_tcb_rpl *p = (const void *)rsp;
+
+ filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_ACT_OPEN_RPL) {
+ const struct cpl_act_open_rpl *p = (const void *)rsp;
+
+ hash_filter_rpl(q->adapter, p);
} else {
dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
opcode);
@@ -106,6 +107,79 @@ out:
return 0;
}
+/**
+ * Setup sge control queues to pass control information.
+ */
+int setup_sge_ctrl_txq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int err = 0, i = 0;
+
+ for_each_port(adapter, i) {
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct sge_ctrl_txq *q = &s->ctrlq[i];
+
+ q->q.size = 1024;
+ err = t4_sge_alloc_ctrl_txq(adapter, q,
+ adapter->eth_dev, i,
+ s->fw_evtq.cntxt_id,
+ rte_socket_id());
+ if (err) {
+ dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
+ err);
+ goto out;
+ }
+ snprintf(name, sizeof(name), "cxgbe_ctrl_pool_%d", i);
+ q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
+ RTE_CACHE_LINE_SIZE,
+ RTE_MBUF_PRIV_ALIGN,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ SOCKET_ID_ANY);
+ if (!q->mb_pool) {
+ dev_err(adapter, "Can't create ctrl pool for port: %d",
+ i);
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ return 0;
+out:
+ t4_free_sge_resources(adapter);
+ return err;
+}
+
+/**
+ * cxgbe_poll_for_completion: Poll rxq for completion
+ * @q: rxq to poll
+ * @us: microseconds to delay
+ * @cnt: number of times to poll
+ * @c: completion to check for 'done' status
+ *
+ * Polls the rxq for reples until completion is done or the count
+ * expires.
+ */
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, struct t4_completion *c)
+{
+ unsigned int i;
+ unsigned int work_done, budget = 4;
+
+ if (!c)
+ return -EINVAL;
+
+ for (i = 0; i < cnt; i++) {
+ cxgbe_poll(q, NULL, budget, &work_done);
+ t4_os_lock(&c->lock);
+ if (c->done) {
+ t4_os_unlock(&c->lock);
+ return 0;
+ }
+ t4_os_unlock(&c->lock);
+ udelay(us);
+ }
+ return -ETIMEDOUT;
+}
+
int setup_sge_fwevtq(struct adapter *adapter)
{
struct sge *s = &adapter->sge;
@@ -197,17 +271,186 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
return 0;
}
+/**
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgbe_alloc_atid(struct tid_info *t, void *data)
+{
+ int atid = -1;
+
+ t4_os_lock(&t->atid_lock);
+ if (t->afree) {
+ union aopen_entry *p = t->afree;
+
+ atid = p - t->atid_tab;
+ t->afree = p->next;
+ p->data = data;
+ t->atids_in_use++;
+ }
+ t4_os_unlock(&t->atid_lock);
+ return atid;
+}
+
+/**
+ * Release an active-open TID.
+ */
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
+{
+ union aopen_entry *p = &t->atid_tab[atid];
+
+ t4_os_lock(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ t4_os_unlock(&t->atid_lock);
+}
+
+/**
+ * Populate a TID_RELEASE WR. Caller must properly size the skb.
+ */
+static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
+ INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
+}
+
+/**
+ * Release a TID and inform HW. If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
+ unsigned short family)
+{
+ struct rte_mbuf *mbuf;
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ WARN_ON(tid >= t->ntids);
+
+ if (t->tid_tab[tid]) {
+ t->tid_tab[tid] = NULL;
+ rte_atomic32_dec(&t->conns_in_use);
+ if (t->hash_base && tid >= t->hash_base) {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_dec(&t->hash_tids_in_use);
+ } else {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_dec(&t->tids_in_use);
+ }
+ }
+
+ mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
+ if (mbuf) {
+ mbuf->data_len = sizeof(struct cpl_tid_release);
+ mbuf->pkt_len = mbuf->data_len;
+ mk_tid_release(mbuf, tid);
+ t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
+ }
+}
+
+/**
+ * Insert a TID.
+ */
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+ unsigned short family)
+{
+ t->tid_tab[tid] = data;
+ if (t->hash_base && tid >= t->hash_base) {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_inc(&t->hash_tids_in_use);
+ } else {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_inc(&t->tids_in_use);
+ }
+
+ rte_atomic32_inc(&t->conns_in_use);
+}
+
+/**
+ * Free TID tables.
+ */
+static void tid_free(struct tid_info *t)
+{
+ if (t->tid_tab) {
+ if (t->ftid_bmap)
+ rte_bitmap_free(t->ftid_bmap);
+
+ if (t->ftid_bmap_array)
+ t4_os_free(t->ftid_bmap_array);
+
+ t4_os_free(t->tid_tab);
+ }
+
+ memset(t, 0, sizeof(struct tid_info));
+}
+
+/**
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+ size_t size;
+ unsigned int ftid_bmap_size;
+ unsigned int natids = t->natids;
+ unsigned int max_ftids = t->nftids;
+
+ ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
+ size = t->ntids * sizeof(*t->tid_tab) +
+ max_ftids * sizeof(*t->ftid_tab) +
+ natids * sizeof(*t->atid_tab);
+
+ t->tid_tab = t4_os_alloc(size);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+ t->ftid_tab = (struct filter_entry *)&t->tid_tab[t->natids];
+ t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
+ if (!t->ftid_bmap_array) {
+ tid_free(t);
+ return -ENOMEM;
+ }
+
+ t4_os_lock_init(&t->atid_lock);
+ t4_os_lock_init(&t->ftid_lock);
+
+ t->afree = NULL;
+ t->atids_in_use = 0;
+ rte_atomic32_init(&t->tids_in_use);
+ rte_atomic32_set(&t->tids_in_use, 0);
+ rte_atomic32_init(&t->conns_in_use);
+ rte_atomic32_set(&t->conns_in_use, 0);
+
+ /* Setup the free list for atid_tab and clear the stid bitmap. */
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+
+ t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
+ ftid_bmap_size);
+ if (!t->ftid_bmap) {
+ tid_free(t);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static inline bool is_x_1g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_1G) != 0;
+ return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
}
static inline bool is_x_10g_port(const struct link_config *lc)
{
unsigned int speeds, high_speeds;
- speeds = V_FW_PORT_CAP_SPEED(G_FW_PORT_CAP_SPEED(lc->supported));
- high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+ speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
+ high_speeds = speeds &
+ ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
return high_speeds != 0;
}
@@ -270,7 +513,7 @@ void cfg_queues(struct rte_eth_dev *eth_dev)
* We default up to # of cores queues per 1G/10G port.
*/
if (nb_ports)
- q_per_port = (MAX_ETH_QSETS -
+ q_per_port = (s->max_ethqsets -
(adap->params.nports - nb_ports)) /
nb_ports;
@@ -294,8 +537,6 @@ void cfg_queues(struct rte_eth_dev *eth_dev)
qidx += pi->n_rx_qsets;
}
- s->max_ethqsets = qidx;
-
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
@@ -345,14 +586,17 @@ static void setup_memwin(struct adapter *adap)
MEMWIN_NIC));
}
-static int init_rss(struct adapter *adap)
+int init_rss(struct adapter *adap)
{
unsigned int i;
- int err;
- err = t4_init_rss_mode(adap, adap->mbox);
- if (err)
- return err;
+ if (is_pf4(adap)) {
+ int err;
+
+ err = t4_init_rss_mode(adap, adap->mbox);
+ if (err)
+ return err;
+ }
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
@@ -360,6 +604,8 @@ static int init_rss(struct adapter *adap)
pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
if (!pi->rss)
return -ENOMEM;
+
+ pi->rss_hf = CXGBE_RSS_HF_ALL;
}
return 0;
}
@@ -367,7 +613,7 @@ static int init_rss(struct adapter *adap)
/**
* Dump basic information about the adapter.
*/
-static void print_adapter_info(struct adapter *adap)
+void print_adapter_info(struct adapter *adap)
{
/**
* Hardware/Firmware/etc. Version/Revision IDs.
@@ -375,27 +621,29 @@ static void print_adapter_info(struct adapter *adap)
t4_dump_version_info(adap);
}
-static void print_port_info(struct adapter *adap)
+void print_port_info(struct adapter *adap)
{
int i;
char buf[80];
struct rte_pci_addr *loc = &adap->pdev->addr;
for_each_port(adap, i) {
- const struct port_info *pi = &adap->port[i];
+ const struct port_info *pi = adap2pinfo(adap, i);
char *bufp = buf;
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
bufp += sprintf(bufp, "100M/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
bufp += sprintf(bufp, "1G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
bufp += sprintf(bufp, "10G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
bufp += sprintf(bufp, "25G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
bufp += sprintf(bufp, "40G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
+ bufp += sprintf(bufp, "50G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
@@ -412,6 +660,84 @@ static void print_port_info(struct adapter *adap)
}
}
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+ struct rte_kvargs *kvlist;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (!kvlist)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (rte_kvargs_process(kvlist, key,
+ check_devargs_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);
+
+ return 1;
+}
+
+static void configure_vlan_types(struct adapter *adapter)
+{
+ struct rte_pci_device *pdev = adapter->pdev;
+ int i;
+
+ for_each_port(adapter, i) {
+ /* OVLAN Type 0x88a8 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x88a8));
+ /* OVLAN Type 0x9100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x9100));
+ /* OVLAN Type 0x8100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN2),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x8100));
+
+ /* IVLAN 0X8100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
+ V_IVLAN_ETYPE(M_IVLAN_ETYPE),
+ V_IVLAN_ETYPE(0x8100));
+
+ t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
+ F_OVLAN_EN0 | F_OVLAN_EN1 |
+ F_OVLAN_EN2 | F_IVLAN_EN,
+ F_OVLAN_EN0 | F_OVLAN_EN1 |
+ F_OVLAN_EN2 | F_IVLAN_EN);
+ }
+
+ if (cxgbe_get_devargs(pdev->device.devargs, CXGBE_DEVARG_KEEP_OVLAN))
+ t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
+ V_RM_OVLAN(1), V_RM_OVLAN(0));
+}
+
static void configure_pcie_ext_tag(struct adapter *adapter)
{
u16 v;
@@ -442,6 +768,40 @@ static void configure_pcie_ext_tag(struct adapter *adapter)
}
}
+/* Figure out how many Queue Sets we can support */
+void configure_max_ethqsets(struct adapter *adapter)
+{
+ unsigned int ethqsets;
+
+ /*
+ * We need to reserve an Ingress Queue for the Asynchronous Firmware
+ * Event Queue.
+ *
+ * For each Queue Set, we'll need the ability to allocate two Egress
+ * Contexts -- one for the Ingress Queue Free List and one for the TX
+ * Ethernet Queue.
+ */
+ if (is_pf4(adapter)) {
+ struct pf_resources *pfres = &adapter->params.pfres;
+
+ ethqsets = pfres->niqflint - 1;
+ if (pfres->neq < ethqsets * 2)
+ ethqsets = pfres->neq / 2;
+ } else {
+ struct vf_resources *vfres = &adapter->params.vfres;
+
+ ethqsets = vfres->niqflint - 1;
+ if (vfres->nethctrl != ethqsets)
+ ethqsets = min(vfres->nethctrl, ethqsets);
+ if (vfres->neq < ethqsets * 2)
+ ethqsets = vfres->neq / 2;
+ }
+
+ if (ethqsets > MAX_ETH_QSETS)
+ ethqsets = MAX_ETH_QSETS;
+ adapter->sge.max_ethqsets = ethqsets;
+}
+
/*
* Tweak configuration based on system architecture, etc. Most of these have
* defaults assigned to them by Firmware Configuration Files (if we're using
@@ -580,8 +940,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* This will allow the firmware to optimize aspects of the hardware
* configuration which will result in improved performance.
*/
- caps_cmd.niccaps &= cpu_to_be16(~(FW_CAPS_CONFIG_NIC_HASHFILTER |
- FW_CAPS_CONFIG_NIC_ETHOFLD));
+ caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
caps_cmd.toecaps = 0;
caps_cmd.iscsicaps = 0;
caps_cmd.rdmacaps = 0;
@@ -648,6 +1007,7 @@ bye:
static int adap_init0(struct adapter *adap)
{
+ struct fw_caps_config_cmd caps_cmd;
int ret = 0;
u32 v, port_vec;
enum dev_state state;
@@ -723,6 +1083,17 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
+ /* Now that we've successfully configured and initialized the adapter
+ * (or found it already initialized), we can ask the Firmware what
+ * resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning info\n");
+ goto bye;
+ }
+
/* Find out what ports are available to us. */
v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
@@ -764,6 +1135,50 @@ static int adap_init0(struct adapter *adap)
V_FW_PARAMS_PARAM_Y(0) | \
V_FW_PARAMS_PARAM_Z(0))
+ params[0] = FW_PARAM_PFVF(FILTER_START);
+ params[1] = FW_PARAM_PFVF(FILTER_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ftid_base = val[0];
+ adap->tids.nftids = val[1] - val[0] + 1;
+
+ params[0] = FW_PARAM_PFVF(CLIP_START);
+ params[1] = FW_PARAM_PFVF(CLIP_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->clipt_start = val[0];
+ adap->clipt_end = val[1];
+
+ /*
+ * Get device capabilities so we can determine what resources we need
+ * to manage.
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ if (ret < 0)
+ goto bye;
+
+ if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
+ is_t6(adap->params.chip)) {
+ if (init_hash_filter(adap) < 0)
+ goto bye;
+ }
+
+ /* query tid-related parameters */
+ params[0] = FW_PARAM_DEV(NTID);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ params, val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+
/* If we're running on newer firmware, let it know that we're
* prepared to deal with encapsulated CPL messages. Older
* firmware won't understand this and we'll just get
@@ -828,6 +1243,8 @@ static int adap_init0(struct adapter *adap)
t4_init_sge_params(adap);
t4_init_tp_params(adap);
configure_pcie_ext_tag(adap);
+ configure_vlan_types(adap);
+ configure_max_ethqsets(adap);
adap->params.drv_memwin = MEMWIN_NIC;
adap->flags |= FW_OK;
@@ -860,7 +1277,7 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
};
- const struct port_info *pi = &adap->port[port_id];
+ const struct port_info *pi = adap2pinfo(adap, port_id);
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
@@ -881,6 +1298,18 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
pi->port_id, pi->mod_type);
}
+inline bool force_linkup(struct adapter *adap)
+{
+ struct rte_pci_device *pdev = adap->pdev;
+
+ if (is_pf4(adap))
+ return false; /* force_linkup not required for pf driver*/
+ if (!cxgbe_get_devargs(pdev->device.devargs,
+ CXGBE_DEVARG_FORCE_LINK_UP))
+ return false;
+ return true;
+}
+
/**
* link_start - enable a port
* @dev: the port to enable
@@ -912,7 +1341,7 @@ int link_start(struct port_info *pi)
ret = 0;
}
}
- if (ret == 0)
+ if (ret == 0 && is_pf4(adapter))
ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
&pi->link_cfg);
if (ret == 0) {
@@ -926,18 +1355,80 @@ int link_start(struct port_info *pi)
ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
true, true, false);
}
+
+ if (ret == 0 && force_linkup(adapter))
+ pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
return ret;
}
/**
- * cxgb4_write_rss - write the RSS table for a given port
+ * cxgbe_write_rss_conf - flash the RSS configuration for a given port
+ * @pi: the port
+ * @rss_hf: Hash configuration to apply
+ */
+int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
+{
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+ u64 flags = 0;
+ u16 rss;
+ int err;
+
+ /* Should never be called before setting up sge eth rx queues */
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ dev_err(adap, "%s No RXQs available on port %d\n",
+ __func__, pi->port_id);
+ return -EINVAL;
+ }
+
+ /* Don't allow unsupported hash functions */
+ if (rss_hf & ~CXGBE_RSS_HF_ALL)
+ return -EINVAL;
+
+ if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
+ rss = rxq[0].rspq.abs_id;
+
+ /* If Tunnel All Lookup isn't specified in the global RSS
+ * Configuration, then we need to specify a default Ingress
+ * Queue for any ingress packets which aren't hashed. We'll
+ * use our first ingress queue ...
+ */
+ err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ flags, rss);
+ return err;
+}
+
+/**
+ * cxgbe_write_rss - write the RSS table for a given port
* @pi: the port
* @queues: array of queue indices for RSS
*
* Sets up the portion of the HW RSS table for the port's VI to distribute
* packets to the Rx queues in @queues.
*/
-int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
+int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
{
u16 *rss;
int i, err;
@@ -958,20 +1449,6 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
pi->rss_size, rss, pi->rss_size);
- /*
- * If Tunnel All Lookup isn't specified in the global RSS
- * Configuration, then we need to specify a default Ingress
- * Queue for any ingress packets which aren't hashed. We'll
- * use our first ingress queue ...
- */
- if (!err)
- err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
- F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN |
- F_FW_RSS_VI_CONFIG_CMD_UDPEN,
- rss[0]);
rte_free(rss);
return err;
}
@@ -1001,7 +1478,11 @@ int setup_rss(struct port_info *pi)
for (j = 0; j < pi->rss_size; j++)
pi->rss[j] = j % pi->n_rx_qsets;
- err = cxgb4_write_rss(pi, pi->rss);
+ err = cxgbe_write_rss(pi, pi->rss);
+ if (err)
+ return err;
+
+ err = cxgbe_write_rss_conf(pi, pi->rss_hf);
if (err)
return err;
pi->flags |= PORT_RSS_DONE;
@@ -1016,7 +1497,8 @@ int setup_rss(struct port_info *pi)
static void enable_rx(struct adapter *adap, struct sge_rspq *q)
{
/* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
+ t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
V_SEINTARM(q->intr_params) |
V_INGRESSQID(q->cntxt_id));
}
@@ -1051,7 +1533,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
#define FW_CAPS_TO_SPEED(__fw_name) \
do { \
- if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
+ if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
SET_SPEED(__fw_name); \
} while (0)
@@ -1106,6 +1588,7 @@ static void fw_caps_to_speed_caps(enum fw_port_type port_type,
case FW_PORT_TYPE_CR4_QSFP:
FW_CAPS_TO_SPEED(SPEED_25G);
FW_CAPS_TO_SPEED(SPEED_40G);
+ FW_CAPS_TO_SPEED(SPEED_50G);
FW_CAPS_TO_SPEED(SPEED_100G);
break;
@@ -1128,14 +1611,38 @@ void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
{
*speed_caps = 0;
- fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.supported,
+ fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps,
speed_caps);
- if (!(pi->link_cfg.supported & FW_PORT_CAP_ANEG))
+ if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
*speed_caps |= ETH_LINK_SPEED_FIXED;
}
/**
+ * cxgbe_set_link_status - Set device link up or down.
+ * @pi: Underlying port's info
+ * @status: 0 - down, 1 - up
+ *
+ * Set the device link up or down.
+ */
+int cxgbe_set_link_status(struct port_info *pi, bool status)
+{
+ struct adapter *adapter = pi->adapter;
+ int err = 0;
+
+ err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status);
+ if (err) {
+ dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
+ return err;
+ }
+
+ if (!status)
+ t4_reset_link_config(adapter, pi->pidx);
+
+ return 0;
+}
+
+/**
* cxgb_up - enable the adapter
* @adap: adapter being enabled
*
@@ -1147,7 +1654,8 @@ int cxgbe_up(struct adapter *adap)
{
enable_rx(adap, &adap->sge.fw_evtq);
t4_sge_tx_monitor_start(adap);
- t4_intr_enable(adap);
+ if (is_pf4(adap))
+ t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE;
/* TODO: deadman watchdog ?? */
@@ -1159,17 +1667,7 @@ int cxgbe_up(struct adapter *adap)
*/
int cxgbe_down(struct port_info *pi)
{
- struct adapter *adapter = pi->adapter;
- int err = 0;
-
- err = t4_enable_vi(adapter, adapter->mbox, pi->viid, false, false);
- if (err) {
- dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
- return err;
- }
-
- t4_reset_link_config(adapter, pi->port_id);
- return 0;
+ return cxgbe_set_link_status(pi, false);
}
/*
@@ -1181,7 +1679,10 @@ void cxgbe_close(struct adapter *adapter)
int i;
if (adapter->flags & FULL_INIT_DONE) {
- t4_intr_disable(adapter);
+ if (is_pf4(adapter))
+ t4_intr_disable(adapter);
+ tid_free(&adapter->tids);
+ t4_cleanup_clip_tbl(adapter);
t4_sge_tx_monitor_stop(adapter);
t4_free_sge_resources(adapter);
for_each_port(adapter, i) {
@@ -1190,11 +1691,16 @@ void cxgbe_close(struct adapter *adapter)
t4_free_vi(adapter, adapter->mbox,
adapter->pf, 0, pi->viid);
rte_free(pi->eth_dev->data->mac_addrs);
+ /* Skip first port since it'll be freed by DPDK stack */
+ if (i) {
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
}
adapter->flags &= ~FULL_INIT_DONE;
}
- if (adapter->flags & FW_OK)
+ if (is_pf4(adapter) && (adapter->flags & FW_OK))
t4_fw_bye(adapter, adapter->mbox);
}
@@ -1220,6 +1726,7 @@ int cxgbe_probe(struct adapter *adapter)
t4_os_lock_init(&adapter->mbox_lock);
TAILQ_INIT(&adapter->mbox_list);
+ t4_os_lock_init(&adapter->win0_lock);
err = t4_prep_adapter(adapter);
if (err)
@@ -1265,21 +1772,16 @@ int cxgbe_probe(struct adapter *adapter)
}
for_each_port(adapter, i) {
- char name[RTE_ETH_NAME_MAX_LEN];
- struct rte_eth_dev_data *data = NULL;
const unsigned int numa_node = rte_socket_id();
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
- pi = &adapter->port[i];
- pi->adapter = adapter;
- pi->xact_addr_filt = -1;
- pi->port_id = i;
-
- snprintf(name, sizeof(name), "cxgbe%d",
- adapter->eth_dev->data->port_id + i);
+ snprintf(name, sizeof(name), "%s_%d",
+ adapter->pdev->device.name, i);
if (i == 0) {
/* First port is already allocated by DPDK */
- pi->eth_dev = adapter->eth_dev;
+ eth_dev = adapter->eth_dev;
goto allocate_mac;
}
@@ -1289,21 +1791,26 @@ int cxgbe_probe(struct adapter *adapter)
*/
/* reserve an ethdev entry */
- pi->eth_dev = rte_eth_dev_allocate(name);
- if (!pi->eth_dev)
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
goto out_free;
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (!data)
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, sizeof(struct port_info),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private)
goto out_free;
- data->port_id = adapter->eth_dev->data->port_id + i;
-
- pi->eth_dev->data = data;
-
allocate_mac:
+ pi = (struct port_info *)eth_dev->data->dev_private;
+ adapter->port[i] = pi;
+ pi->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = i;
+ pi->pidx = i;
+
pi->eth_dev->device = &adapter->pdev->device;
- pi->eth_dev->data->dev_private = pi;
pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
@@ -1318,6 +1825,11 @@ allocate_mac:
err = -1;
goto out_free;
}
+
+ if (i > 0) {
+ /* First port will be notified by upper layer */
+ rte_eth_dev_probing_finish(eth_dev);
+ }
}
if (adapter->flags & FW_OK) {
@@ -1334,6 +1846,35 @@ allocate_mac:
print_adapter_info(adapter);
print_port_info(adapter);
+ adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+ adapter->clipt_end);
+ if (!adapter->clipt) {
+ /* We tolerate a lack of clip_table, giving up some
+ * functionality
+ */
+ dev_warn(adapter, "could not allocate CLIP. Continuing\n");
+ }
+
+ if (tid_init(&adapter->tids) < 0) {
+ /* Disable filtering support */
+ dev_warn(adapter, "could not allocate TID table, "
+ "filter support disabled. Continuing\n");
+ }
+
+ if (is_hashfilter(adapter)) {
+ if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
+ u32 hash_base, hash_reg;
+
+ hash_reg = A_LE_DB_TID_HASHBASE;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base / 4;
+ }
+ } else {
+ /* Disable hash filtering support */
+ dev_warn(adapter,
+ "Maskless filter support disabled. Continuing\n");
+ }
+
err = init_rss(adapter);
if (err)
goto out_free;
@@ -1349,8 +1890,11 @@ out_free:
/* Skip first port since it'll be de-allocated by DPDK */
if (i == 0)
continue;
- if (pi->eth_dev->data)
- rte_free(pi->eth_dev->data);
+ if (pi->eth_dev) {
+ if (pi->eth_dev->data->dev_private)
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
}
if (adapter->flags & FW_OK)
diff --git a/drivers/net/cxgbe/cxgbe_ofld.h b/drivers/net/cxgbe/cxgbe_ofld.h
new file mode 100644
index 00000000..50931ed0
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_ofld.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_OFLD_H_
+#define _CXGBE_OFLD_H_
+
+#include <rte_bitmap.h>
+
+#include "cxgbe_filter.h"
+
+#define INIT_TP_WR(w, tid) do { \
+ (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_TP_WR) | \
+ V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+ (w)->wr.wr_mid = cpu_to_be32( \
+ V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+#define INIT_TP_WR_MIT_CPL(w, cpl, tid) do { \
+ INIT_TP_WR(w, tid); \
+ OPCODE_TID(w) = cpu_to_be32(MK_OPCODE_TID(cpl, tid)); \
+} while (0)
+
+#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
+ (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | \
+ V_FW_WR_ATOMIC(atomic)); \
+ (w)->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+/*
+ * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+union aopen_entry {
+ void *data;
+ union aopen_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of filter TID.
+ * The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+ void **tid_tab;
+ unsigned int ntids;
+ struct filter_entry *ftid_tab; /* Normal filters */
+ union aopen_entry *atid_tab;
+ struct rte_bitmap *ftid_bmap;
+ uint8_t *ftid_bmap_array;
+ unsigned int nftids, natids;
+ unsigned int ftid_base, hash_base;
+
+ union aopen_entry *afree;
+ unsigned int atids_in_use;
+
+ /* TIDs in the TCAM */
+ rte_atomic32_t tids_in_use;
+ /* TIDs in the HASH */
+ rte_atomic32_t hash_tids_in_use;
+ rte_atomic32_t conns_in_use;
+
+ rte_spinlock_t atid_lock __rte_cache_aligned;
+ rte_spinlock_t ftid_lock;
+};
+
+static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
+{
+ return tid < t->ntids ? t->tid_tab[tid] : NULL;
+}
+
+static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
+{
+ return atid < t->natids ? t->atid_tab[atid].data : NULL;
+}
+
+int cxgbe_alloc_atid(struct tid_info *t, void *data);
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid);
+void cxgbe_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
+ unsigned short family);
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+ unsigned short family);
+
+#endif /* _CXGBE_OFLD_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_pfvf.h b/drivers/net/cxgbe/cxgbe_pfvf.h
new file mode 100644
index 00000000..8d0a105a
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbe_pfvf.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_PFVF_H_
+#define _CXGBE_PFVF_H_
+
+void cxgbe_dev_rx_queue_release(void *q);
+void cxgbe_dev_tx_queue_release(void *q);
+void cxgbe_dev_stop(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_close(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info);
+void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev);
+int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr);
+int cxgbe_dev_configure(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);
+int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);
+int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int cxgbe_dev_start(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete);
+int cxgbe_dev_set_link_up(struct rte_eth_dev *dev);
+int cxgbe_dev_set_link_down(struct rte_eth_dev *dev);
+uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+const uint32_t *cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev);
+#endif /* _CXGBE_PFVF_H_ */
diff --git a/drivers/net/cxgbe/cxgbevf_ethdev.c b/drivers/net/cxgbe/cxgbevf_ethdev.c
new file mode 100644
index 00000000..3b32ca9d
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbevf_ethdev.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+
+#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+
+/*
+ * Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static const struct rte_pci_id cxgb4vf_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x8
+
+#define PCI_VENDOR_ID_CHELSIO 0x1425
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+ { .vendor_id = 0, } \
+ }
+
+/*
+ *... and the PCI ID Table itself ...
+ */
+#include "t4_pci_id_tbl.h"
+
+/*
+ * Get port statistics.
+ */
+static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *eth_stats)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct port_stats ps;
+ unsigned int i;
+
+ cxgbevf_stats_get(pi, &ps);
+
+ /* RX Stats */
+ eth_stats->ierrors = ps.rx_len_err;
+
+ /* TX Stats */
+ eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames +
+ ps.tx_ucast_frames;
+ eth_stats->obytes = ps.tx_octets;
+ eth_stats->oerrors = ps.tx_drop;
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ eth_stats->q_ipackets[i] = rxq->stats.pkts;
+ eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
+ eth_stats->ipackets += eth_stats->q_ipackets[i];
+ eth_stats->ibytes += eth_stats->q_ibytes[i];
+ }
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ eth_stats->q_opackets[i] = txq->stats.pkts;
+ eth_stats->q_obytes[i] = txq->stats.tx_bytes;
+ eth_stats->q_errors[i] = txq->stats.mapping_err;
+ }
+ return 0;
+}
+
+static const struct eth_dev_ops cxgbevf_eth_dev_ops = {
+ .dev_start = cxgbe_dev_start,
+ .dev_stop = cxgbe_dev_stop,
+ .dev_close = cxgbe_dev_close,
+ .promiscuous_enable = cxgbe_dev_promiscuous_enable,
+ .promiscuous_disable = cxgbe_dev_promiscuous_disable,
+ .allmulticast_enable = cxgbe_dev_allmulticast_enable,
+ .allmulticast_disable = cxgbe_dev_allmulticast_disable,
+ .dev_configure = cxgbe_dev_configure,
+ .dev_infos_get = cxgbe_dev_info_get,
+ .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
+ .link_update = cxgbe_dev_link_update,
+ .dev_set_link_up = cxgbe_dev_set_link_up,
+ .dev_set_link_down = cxgbe_dev_set_link_down,
+ .mtu_set = cxgbe_dev_mtu_set,
+ .tx_queue_setup = cxgbe_dev_tx_queue_setup,
+ .tx_queue_start = cxgbe_dev_tx_queue_start,
+ .tx_queue_stop = cxgbe_dev_tx_queue_stop,
+ .tx_queue_release = cxgbe_dev_tx_queue_release,
+ .rx_queue_setup = cxgbe_dev_rx_queue_setup,
+ .rx_queue_start = cxgbe_dev_rx_queue_start,
+ .rx_queue_stop = cxgbe_dev_rx_queue_stop,
+ .rx_queue_release = cxgbe_dev_rx_queue_release,
+ .stats_get = cxgbevf_dev_stats_get,
+ .mac_addr_set = cxgbe_mac_addr_set,
+};
+
+/*
+ * Initialize driver
+ * It returns 0 on success.
+ */
+static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct adapter *adapter = NULL;
+ int err = 0;
+
+ CXGBE_FUNC_TRACE();
+
+ eth_dev->dev_ops = &cxgbevf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ /* for secondary processes, we attach to ethdevs allocated by primary
+ * and do minimal initialization.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ int i;
+
+ for (i = 1; i < MAX_NPORTS; i++) {
+ struct rte_eth_dev *rest_eth_dev;
+ char namei[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(namei, sizeof(namei), "%s_%d",
+ pci_dev->device.name, i);
+ rest_eth_dev = rte_eth_dev_attach_secondary(namei);
+ if (rest_eth_dev) {
+ rest_eth_dev->device = &pci_dev->device;
+ rest_eth_dev->dev_ops =
+ eth_dev->dev_ops;
+ rest_eth_dev->rx_pkt_burst =
+ eth_dev->rx_pkt_burst;
+ rest_eth_dev->tx_pkt_burst =
+ eth_dev->tx_pkt_burst;
+ rte_eth_dev_probing_finish(rest_eth_dev);
+ }
+ }
+ return 0;
+ }
+
+ snprintf(name, sizeof(name), "cxgbevfadapter%d",
+ eth_dev->data->port_id);
+ adapter = rte_zmalloc(name, sizeof(*adapter), 0);
+ if (!adapter)
+ return -1;
+
+ adapter->use_unpacked_mode = 1;
+ adapter->regs = (void *)pci_dev->mem_resource[0].addr;
+ if (!adapter->regs) {
+ dev_err(adapter, "%s: cannot map device registers\n", __func__);
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->pdev = pci_dev;
+ adapter->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ err = cxgbevf_probe(adapter);
+ if (err) {
+ dev_err(adapter, "%s: cxgbevf probe failed with err %d\n",
+ __func__, err);
+ goto out_free_adapter;
+ }
+
+ return 0;
+
+out_free_adapter:
+ rte_free(adapter);
+ return err;
+}
+
+static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info),
+ eth_cxgbevf_dev_init);
+}
+
+static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_cxgbevf_pmd = {
+ .id_table = cxgb4vf_pci_tbl,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_cxgbevf_pci_probe,
+ .remove = eth_cxgbevf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci");
diff --git a/drivers/net/cxgbe/cxgbevf_main.c b/drivers/net/cxgbe/cxgbevf_main.c
new file mode 100644
index 00000000..4214d031
--- /dev/null
+++ b/drivers/net/cxgbe/cxgbevf_main.c
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgbe.h"
+
+/*
+ * Figure out how many Ports and Queue Sets we can support. This depends on
+ * knowing our Virtual Function Resources and may be called a second time if
+ * we fall back from MSI-X to MSI Interrupt Mode.
+ */
+static void size_nports_qsets(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ unsigned int pmask_nports;
+
+ /*
+ * The number of "ports" which we support is equal to the number of
+ * Virtual Interfaces with which we've been provisioned.
+ */
+ adapter->params.nports = vfres->nvi;
+ if (adapter->params.nports > MAX_NPORTS) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
+ " allowed virtual interfaces\n", MAX_NPORTS,
+ adapter->params.nports);
+ adapter->params.nports = MAX_NPORTS;
+ }
+
+ /*
+ * We may have been provisioned with more VIs than the number of
+ * ports we're allowed to access (our Port Access Rights Mask).
+ * This is obviously a configuration conflict but we don't want to
+ * do anything silly just because of that.
+ */
+ pmask_nports = hweight32(adapter->params.vfres.pmask);
+ if (pmask_nports < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
+ " virtual interfaces; limited by Port Access Rights"
+ " mask %#x\n", pmask_nports, adapter->params.nports,
+ adapter->params.vfres.pmask);
+ adapter->params.nports = pmask_nports;
+ }
+
+ configure_max_ethqsets(adapter);
+ if (adapter->sge.max_ethqsets < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d available"
+ " virtual interfaces (too few Queue Sets)\n",
+ adapter->sge.max_ethqsets, adapter->params.nports);
+ adapter->params.nports = adapter->sge.max_ethqsets;
+ }
+}
+
+void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats)
+{
+ t4vf_get_port_stats(pi->adapter, pi->pidx, stats);
+}
+
+static int adap_init0vf(struct adapter *adapter)
+{
+ u32 param, val = 0;
+ int err;
+
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Grab basic operational parameters. These will predominantly have
+ * been set up by the Physical Function Driver or will be hard coded
+ * into the adapter. We just have to live with them ... Note that
+ * we _must_ get our VPD parameters before our SGE parameters because
+ * we need to know the adapter's core clock from the VPD in order to
+ * properly decode the SGE Timer Values.
+ */
+ err = t4vf_get_dev_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " device parameters: err=%d\n", err);
+ return err;
+ }
+
+ err = t4vf_get_vpd_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " VPD parameters: err=%d\n", err);
+ return err;
+ }
+
+ adapter->pf = t4vf_get_pf_from_vf(adapter);
+ err = t4vf_sge_init(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "error in sge init\n");
+ return err;
+ }
+
+ err = t4vf_get_rss_glb_config(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " RSS parameters: err=%d\n", err);
+ return err;
+ }
+ if (adapter->params.rss.mode !=
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ dev_err(adapter->pdev_dev, "unable to operate with global RSS"
+ " mode %d\n", adapter->params.rss.mode);
+ return -EINVAL;
+ }
+
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
+ val = 1;
+ t4vf_set_params(adapter, 1, &param, &val);
+
+ /*
+ * Grab our Virtual Interface resource allocation, extract the
+ * features that we're interested in and do a bit of sanity testing on
+ * what we discover.
+ */
+ err = t4vf_get_vfres(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to get virtual interface"
+ " resources: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Check for various parameter sanity issues.
+ */
+ if (adapter->params.vfres.pmask == 0) {
+ dev_err(adapter->pdev_dev, "no port access configured\n"
+ "usable!\n");
+ return -EINVAL;
+ }
+ if (adapter->params.vfres.nvi == 0) {
+ dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
+ "usable!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initialize nports and max_ethqsets now that we have our Virtual
+ * Function Resources.
+ */
+ size_nports_qsets(adapter);
+ adapter->flags |= FW_OK;
+ return 0;
+}
+
+int cxgbevf_probe(struct adapter *adapter)
+{
+ struct port_info *pi;
+ unsigned int pmask;
+ int err = 0;
+ int i;
+
+ t4_os_lock_init(&adapter->mbox_lock);
+ TAILQ_INIT(&adapter->mbox_list);
+ err = t4vf_prep_adapter(adapter);
+ if (err)
+ return err;
+
+ if (!is_t4(adapter->params.chip)) {
+ adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
+ if (!adapter->bar2) {
+ dev_err(adapter, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ return err;
+ }
+ }
+
+ err = adap_init0vf(adapter);
+ if (err) {
+ dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ pmask = adapter->params.vfres.pmask;
+ for_each_port(adapter, i) {
+ const unsigned int numa_node = rte_socket_id();
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+ int port_id;
+
+ if (pmask == 0)
+ break;
+ port_id = ffs(pmask) - 1;
+ pmask &= ~(1 << port_id);
+
+ snprintf(name, sizeof(name), "%s_%d",
+ adapter->pdev->device.name, i);
+
+ if (i == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = adapter->eth_dev;
+ goto allocate_mac;
+ }
+
+ /*
+ * now do all data allocation - for eth_dev structure,
+ * and internal (private) data for the remaining ports
+ */
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, sizeof(struct port_info),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private)
+ goto out_free;
+
+allocate_mac:
+ pi = (struct port_info *)eth_dev->data->dev_private;
+ adapter->port[i] = pi;
+ pi->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = port_id;
+ pi->pidx = i;
+
+ pi->eth_dev->device = &adapter->pdev->device;
+ pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
+ pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
+ pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
+ pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
+ ETHER_ADDR_LEN, 0);
+ if (!pi->eth_dev->data->mac_addrs) {
+ dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
+ __func__);
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (i > 0) {
+ /* First port will be notified by upper layer */
+ rte_eth_dev_probing_finish(eth_dev);
+ }
+ }
+
+ if (adapter->flags & FW_OK) {
+ err = t4vf_port_init(adapter);
+ if (err) {
+ dev_err(adapter, "%s: t4_port_init failed with err %d\n",
+ __func__, err);
+ goto out_free;
+ }
+ }
+
+ cfg_queues(adapter->eth_dev);
+ print_adapter_info(adapter);
+ print_port_info(adapter);
+
+ err = init_rss(adapter);
+ if (err)
+ goto out_free;
+ return 0;
+
+out_free:
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
+ /* Skip first port since it'll be de-allocated by DPDK */
+ if (i == 0)
+ continue;
+ if (pi->eth_dev) {
+ if (pi->eth_dev->data->dev_private)
+ rte_free(pi->eth_dev->data->dev_private);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
+ }
+ return -err;
+}
diff --git a/drivers/net/cxgbe/meson.build b/drivers/net/cxgbe/meson.build
new file mode 100644
index 00000000..7c69a34b
--- /dev/null
+++ b/drivers/net/cxgbe/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('cxgbe_ethdev.c',
+ 'cxgbe_main.c',
+ 'cxgbevf_ethdev.c',
+ 'cxgbevf_main.c',
+ 'sge.c',
+ 'cxgbe_filter.c',
+ 'cxgbe_flow.c',
+ 'clip_tbl.c',
+ 'base/t4_hw.c',
+ 'base/t4vf_hw.c')
+includes += include_directories('base')
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 3d5aa596..4ea40d19 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -1,34 +1,6 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
#include <sys/queue.h>
@@ -83,6 +55,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
#define MAX_IMM_TX_PKT_LEN 256
/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
+/*
* Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
* per mbuf buffer). We currently only support two sizes for 1500- and
* 9000-byte MTUs. We could easily support more but there doesn't seem to be
@@ -337,7 +314,11 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
* mechanism.
*/
if (unlikely(!q->bar2_addr)) {
- t4_write_reg_relaxed(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+ u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_KDOORBELL;
+
+ t4_write_reg_relaxed(adap, reg,
val | V_QID(q->cntxt_id));
} else {
writel_relaxed(val | V_QID(q->bar2_qid),
@@ -385,7 +366,8 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
struct rte_mbuf *buf_bulk[n];
int ret, i;
struct rte_pktmbuf_pool_private *mbp_priv;
- u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
+ u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
/* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
@@ -570,12 +552,16 @@ static inline int is_eth_imm(const struct rte_mbuf *m)
/**
* calc_tx_flits - calculate the number of flits for a packet Tx WR
* @m: the packet
+ * @adap: adapter structure pointer
*
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
+static inline unsigned int calc_tx_flits(const struct rte_mbuf *m,
+ struct adapter *adap)
{
+ size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) :
+ sizeof(struct fw_eth_tx_pkt_vm_wr);
unsigned int flits;
int hdrlen;
@@ -600,11 +586,10 @@ static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
*/
flits = sgl_len(m->nb_segs);
if (m->tso_segsz)
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
+ flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
else
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
+ flits += (wr_size +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
return flits;
}
@@ -848,14 +833,20 @@ static void tx_timer_cb(void *data)
static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
struct sge_eth_txq *txq)
{
- u32 wr_mid;
- struct sge_txq *q = &txq->q;
+ struct fw_eth_tx_pkts_vm_wr *vmwr;
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
struct fw_eth_tx_pkts_wr *wr;
+ struct sge_txq *q = &txq->q;
unsigned int ndesc;
+ u32 wr_mid;
/* fill the pkts WR header */
wr = (void *)&q->desc[q->pidx];
wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ vmwr = (void *)&q->desc[q->pidx];
wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
ndesc = flits_to_desc(q->coalesce.flits);
@@ -863,12 +854,18 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
wr->plen = cpu_to_be16(q->coalesce.len);
wr->npkt = q->coalesce.idx;
wr->r3 = 0;
- wr->type = q->coalesce.type;
+ if (is_pf4(adap)) {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ wr->type = q->coalesce.type;
+ } else {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
+ vmwr->r4 = 0;
+ memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst,
+ fw_hdr_copy_len);
+ }
/* zero out coalesce structure members */
- q->coalesce.idx = 0;
- q->coalesce.flits = 0;
- q->coalesce.len = 0;
+ memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce));
txq_advance(q, ndesc);
txq->stats.coal_wr++;
@@ -896,13 +893,27 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
unsigned int *nflits,
struct adapter *adap)
{
+ struct fw_eth_tx_pkts_vm_wr *wr;
+ const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci));
struct sge_txq *q = &txq->q;
unsigned int flits, ndesc;
unsigned char type = 0;
- int credits;
+ int credits, wr_size;
/* use coal WR type 1 when no frags are present */
type = (mbuf->nb_segs == 1) ? 1 : 0;
+ if (!is_pf4(adap)) {
+ if (!type)
+ return 0;
+
+ if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *),
+ fw_hdr_copy_len))
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ }
if (unlikely(type != q->coalesce.type && q->coalesce.idx))
ship_tx_pkt_coalesce_wr(adap, txq);
@@ -948,16 +959,21 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
new:
/* start a new pkts WR, the WR header is not filled below */
- flits += sizeof(struct fw_eth_tx_pkts_wr) / sizeof(__be64);
+ wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) :
+ sizeof(struct fw_eth_tx_pkts_vm_wr);
+ flits += wr_size / sizeof(__be64);
ndesc = flits_to_desc(q->coalesce.flits + flits);
credits = txq_avail(q) - ndesc;
if (unlikely(credits < 0 || wraps_around(q, ndesc)))
return 0;
- q->coalesce.flits += 2;
+ q->coalesce.flits += wr_size / sizeof(__be64);
q->coalesce.type = type;
q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
- 2 * sizeof(__be64);
+ q->coalesce.flits * sizeof(__be64);
+ if (!is_pf4(adap))
+ memcpy((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len);
return 1;
}
@@ -987,6 +1003,8 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
struct cpl_tx_pkt_core *cpl;
struct tx_sw_desc *sd;
unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
+ unsigned int max_coal_pkt_num = is_pf4(adap) ? ETH_COALESCE_PKT_NUM :
+ ETH_COALESCE_VF_PKT_NUM;
#ifdef RTE_LIBRTE_CXGBE_TPUT
RTE_SET_USED(nb_pkts);
@@ -1030,9 +1048,12 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
}
- cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) |
- V_TXPKT_PF(adap->pf));
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id));
cpl->pack = htons(0);
cpl->len = htons(len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1061,7 +1082,7 @@ static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
sd->coalesce.idx = (idx & 1) + 1;
/* send the coaelsced work request if max reached */
- if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM
+ if (++q->coalesce.idx == max_coal_pkt_num
#ifndef RTE_LIBRTE_CXGBE_TPUT
|| q->coalesce.idx >= nb_pkts
#endif
@@ -1085,6 +1106,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
struct adapter *adap;
struct rte_mbuf *m = mbuf;
struct fw_eth_tx_pkt_wr *wr;
+ struct fw_eth_tx_pkt_vm_wr *vmwr;
struct cpl_tx_pkt_core *cpl;
struct tx_sw_desc *d;
dma_addr_t addr[m->nb_segs];
@@ -1095,7 +1117,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
u32 wr_mid;
u64 cntrl, *end;
bool v6;
- u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
/* Reject xmit if queue is stopped */
if (unlikely(txq->flags & EQ_STOPPED))
@@ -1115,7 +1137,7 @@ out_free:
(unlikely(m->pkt_len > max_pkt_len)))
goto out_free;
- pi = (struct port_info *)txq->eth_dev->data->dev_private;
+ pi = (struct port_info *)txq->data->dev_private;
adap = pi->adapter;
cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
@@ -1141,7 +1163,7 @@ out_free:
if (txq->q.coalesce.idx)
ship_tx_pkt_coalesce_wr(adap, txq);
- flits = calc_tx_flits(m);
+ flits = calc_tx_flits(m, adap);
ndesc = flits_to_desc(flits);
credits = txq_avail(&txq->q) - ndesc;
@@ -1163,31 +1185,55 @@ out_free:
}
wr = (void *)&txq->q.desc[txq->q.pidx];
+ vmwr = (void *)&txq->q.desc[txq->q.pidx];
wr->equiq_to_len16 = htonl(wr_mid);
- wr->r3 = rte_cpu_to_be_64(0);
- end = (u64 *)wr + flits;
+ if (is_pf4(adap)) {
+ wr->r3 = rte_cpu_to_be_64(0);
+ end = (u64 *)wr + flits;
+ } else {
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
+
+ vmwr->r3[0] = rte_cpu_to_be_32(0);
+ vmwr->r3[1] = rte_cpu_to_be_32(0);
+ memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *),
+ fw_hdr_copy_len);
+ end = (u64 *)vmwr + flits;
+ }
len = 0;
len += sizeof(*cpl);
/* Coalescing skipped and we send through normal path */
if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
- wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
V_FW_WR_IMMDLEN(len));
- cpl = (void *)(wr + 1);
+ if (is_pf4(adap))
+ cpl = (void *)(wr + 1);
+ else
+ cpl = (void *)(vmwr + 1);
if (m->ol_flags & PKT_TX_IP_CKSUM) {
cntrl = hwcsum(adap->params.chip, m) |
F_TXPKT_IPCSUM_DIS;
txq->stats.tx_cso++;
}
} else {
- lso = (void *)(wr + 1);
+ if (is_pf4(adap))
+ lso = (void *)(wr + 1);
+ else
+ lso = (void *)(vmwr + 1);
v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
l3hdr_len = m->l3_len;
l4hdr_len = m->l4_len;
eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
len += sizeof(*lso);
- wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
V_FW_WR_IMMDLEN(len));
lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
@@ -1221,9 +1267,14 @@ out_free:
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
}
- cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) |
- V_TXPKT_PF(adap->pf));
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) |
+ V_TXPKT_PF(0));
+
cpl->pack = htons(0);
cpl->len = htons(m->pkt_len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -1254,6 +1305,126 @@ out_free:
}
/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any mbufs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+/**
+ * is_imm - check whether a packet can be sent as immediate data
+ * @mbuf: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct rte_mbuf *mbuf)
+{
+ return mbuf->pkt_len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ * inline_tx_mbuf: inline a packet's data into TX descriptors
+ * @q: the TX queue where the packet will be inlined
+ * @from: pointer to data portion of packet
+ * @to: pointer after cpl where data has to be inlined
+ * @len: length of data to inline
+ *
+ * Inline a packet's contents directly to TX descriptors, starting at
+ * the given position within the TX DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to,
+ int len)
+{
+ int left = RTE_PTR_DIFF(q->stat, *to);
+
+ if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) {
+ rte_memcpy(*to, from, len);
+ *to = RTE_PTR_ADD(*to, len);
+ } else {
+ rte_memcpy(*to, from, left);
+ from = RTE_PTR_ADD(from, left);
+ left = len - left;
+ rte_memcpy((void *)q->desc, from, left);
+ *to = RTE_PTR_ADD((void *)q->desc, left);
+ }
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @q: the control queue
+ * @mbuf: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ unsigned int ndesc;
+ struct fw_wr_hdr *wr;
+ caddr_t dst;
+
+ if (unlikely(!is_imm(mbuf))) {
+ WARN_ON(1);
+ rte_pktmbuf_free(mbuf);
+ return -1;
+ }
+
+ reclaim_completed_tx_imm(&q->q);
+ ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc));
+ t4_os_lock(&q->ctrlq_lock);
+
+ q->full = txq_avail(&q->q) < ndesc ? 1 : 0;
+ if (unlikely(q->full)) {
+ t4_os_unlock(&q->ctrlq_lock);
+ return -1;
+ }
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ dst = (void *)wr;
+ inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t),
+ &dst, mbuf->data_len);
+
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < 64))
+ wr->lo |= htonl(F_FW_WR_EQUEQ);
+
+ q->txp++;
+
+ ring_tx_db(q->adapter, &q->q);
+ t4_os_unlock(&q->ctrlq_lock);
+
+ rte_pktmbuf_free(mbuf);
+ return 0;
+}
+
+/**
+ * t4_mgmt_tx - send a management message
+ * @q: the control queue
+ * @mbuf: the packet containing the management message
+ *
+ * Send a management message through control queue.
+ */
+int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ return ctrl_xmit(q, mbuf);
+}
+
+/**
* alloc_ring - allocate resources for an SGE descriptor ring
* @dev: the PCI device's core device
* @nelem: the number of descriptors
@@ -1299,7 +1470,8 @@ static void *alloc_ring(size_t nelem, size_t elem_size,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096);
+ tz = rte_memzone_reserve_aligned(z_name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, 4096);
if (!tz)
return NULL;
@@ -1468,6 +1640,7 @@ static int process_responses(struct sge_rspq *q, int budget,
rsp_type = G_RSPD_TYPE(rc->u.type_gen);
if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
+ struct sge *s = &q->adapter->sge;
unsigned int stat_pidx;
int stat_pidx_diff;
@@ -1550,10 +1723,12 @@ static int process_responses(struct sge_rspq *q, int budget,
}
if (cpl->vlan_ex) {
- pkt->ol_flags |= PKT_RX_VLAN;
+ pkt->ol_flags |= PKT_RX_VLAN |
+ PKT_RX_VLAN_STRIPPED;
pkt->vlan_tci = ntohs(cpl->vlan);
}
+ rte_pktmbuf_adj(pkt, s->pktshift);
rxq->stats.pkts++;
rxq->stats.rx_bytes += pkt->pkt_len;
rx_pkts[budget - budget_left] = pkt;
@@ -1612,7 +1787,11 @@ int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
if (unlikely(!q->bar2_addr)) {
- t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
+ u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_GTS;
+
+ t4_write_reg(q->adapter, reg,
val | V_INGRESSQID((u32)q->cntxt_id));
} else {
writel(val | V_INGRESSQID(q->bar2_qid),
@@ -1689,6 +1868,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
unsigned int nb_refill;
+ u8 pciechan;
/* Size needs to be multiple of 16, including status entry. */
iq->size = cxgbe_roundup(iq->size, 16);
@@ -1706,8 +1886,23 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_WRITE | F_FW_CMD_EXEC |
- V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
+ V_FW_IQ_CMD_VFN(0));
+ if (cong >= 0)
+ c.iqns_to_fl0congen =
+ htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+ V_FW_IQ_CMD_IQTYPE(cong ?
+ FW_IQ_IQTYPE_NIC :
+ FW_IQ_IQTYPE_OFLD) |
+ F_FW_IQ_CMD_IQRO);
+ } else {
+ pciechan = pi->port_id;
+ }
+
c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
(sizeof(c) / 16));
c.type_to_iqandstindex =
@@ -1719,16 +1914,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize =
- htons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 :
- pi->tx_chan) |
+ htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
- if (cong >= 0)
- c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
- F_FW_IQ_CMD_IQRO);
if (fl) {
struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
@@ -1768,7 +1959,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
0 : F_FW_IQ_CMD_FL0PACKEN) |
F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
F_FW_IQ_CMD_FL0PADEN);
- if (cong >= 0)
+ if (is_pf4(adap) && cong >= 0)
c.iqns_to_fl0congen |=
htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
F_FW_IQ_CMD_FL0CONGCIF |
@@ -1789,7 +1980,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
c.fl0addr = cpu_to_be64(fl->addr);
}
- ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret)
goto err;
@@ -1806,7 +2000,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->stat = (void *)&iq->desc[iq->size * 8];
iq->eth_dev = eth_dev;
iq->handler = hnd;
- iq->port_id = pi->port_id;
+ iq->port_id = pi->pidx;
iq->mb_pool = mp;
/* set offset to -1 to distinguish ingress queues without FL */
@@ -1846,7 +2040,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
* a lot easier to fix in one place ... For now we do something very
* simple (and hopefully less wrong).
*/
- if (!is_t4(adap->params.chip) && cong >= 0) {
+ if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
u32 param, val;
int i;
@@ -1893,9 +2087,11 @@ err:
return ret;
}
-static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id,
+ unsigned int abs_id)
{
q->cntxt_id = id;
+ q->abs_id = abs_id;
q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
&q->bar2_qid);
q->cidx = 0;
@@ -1943,6 +2139,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
+ u8 pciechan;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
@@ -1961,16 +2158,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_WRITE | F_FW_CMD_EXEC |
- V_FW_EQ_ETH_CMD_PFN(adap->pf) |
- V_FW_EQ_ETH_CMD_VFN(0));
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) |
+ V_FW_EQ_ETH_CMD_VFN(0));
+ } else {
+ pciechan = pi->port_id;
+ }
+
c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
V_FW_EQ_ETH_CMD_VIID(pi->viid));
c.fetchszm_to_iqid =
htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ V_FW_EQ_ETH_CMD_PCIECHN(pciechan) |
F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
c.dcaen_to_eqsize =
htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
@@ -1978,7 +2181,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
V_FW_EQ_ETH_CMD_EQSIZE(nentries));
c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret) {
rte_free(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -1986,7 +2192,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return ret;
}
- init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)));
+ init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)),
+ G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd)));
txq->stats.tso = 0;
txq->stats.pkts = 0;
txq->stats.tx_cso = 0;
@@ -1997,10 +2204,69 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->stats.mapping_err = 0;
txq->flags |= EQ_STOPPED;
txq->eth_dev = eth_dev;
+ txq->data = eth_dev->data;
t4_os_lock_init(&txq->txq_lock);
return 0;
}
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id)
+{
+ int ret, nentries;
+ struct fw_eq_ctrl_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ eth_dev->device->driver->name, "ctrl_tx_ring",
+ eth_dev->data->port_id, queue_id);
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
+ 0, &txq->q.phys_addr,
+ NULL, 0, queue_id,
+ socket_id, z_name, z_name_sw);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(adap->pf) |
+ V_FW_EQ_CTRL_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC |
+ F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16));
+ c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0));
+ c.physeqid_pkd = htonl(0);
+ c.fetchszm_to_iqid =
+ htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid));
+ c.dcaen_to_eqsize =
+ htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret) {
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)),
+ G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd)));
+ txq->adapter = adap;
+ txq->full = 0;
+ return 0;
+}
+
static void free_txq(struct sge_txq *q)
{
q->cntxt_id = 0;
@@ -2095,7 +2361,7 @@ void t4_sge_tx_monitor_stop(struct adapter *adap)
*/
void t4_free_sge_resources(struct adapter *adap)
{
- int i;
+ unsigned int i;
struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
@@ -2112,6 +2378,18 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
+ /* clean up control Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+ if (cq->q.desc) {
+ reclaim_completed_tx_imm(&cq->q);
+ t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
+ cq->q.cntxt_id);
+ free_txq(&cq->q);
+ }
+ }
+
if (adap->sge.fw_evtq.desc)
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
}
@@ -2280,3 +2558,182 @@ int t4_sge_init(struct adapter *adap)
return 0;
}
+
+int t4vf_sge_init(struct adapter *adap)
+{
+ struct sge_params *sge_params = &adap->params.sge;
+ u32 sge_ingress_queues_per_page;
+ u32 sge_egress_queues_per_page;
+ u32 sge_control, sge_control2;
+ u32 fl_small_pg, fl_large_pg;
+ u32 sge_ingress_rx_threshold;
+ u32 sge_timer_value_0_and_1;
+ u32 sge_timer_value_2_and_3;
+ u32 sge_timer_value_4_and_5;
+ u32 sge_congestion_control;
+ struct sge *s = &adap->sge;
+ unsigned int s_hps, s_qpp;
+ u32 sge_host_page_size;
+ u32 params[7], vals[7];
+ int v;
+
+ /* query basic params from fw */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
+ params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0));
+ params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1));
+ params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
+ params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
+ params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
+ v = t4vf_query_params(adap, 7, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+
+ sge_control = vals[0];
+ sge_host_page_size = vals[1];
+ fl_small_pg = vals[2];
+ fl_large_pg = vals[3];
+ sge_timer_value_0_and_1 = vals[4];
+ sge_timer_value_2_and_3 = vals[5];
+ sge_timer_value_4_and_5 = vals[6];
+
+ /*
+ * Start by vetting the basic SGE parameters which have been set up by
+ * the Physical Function Driver.
+ */
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+ fl_small_pg, fl_large_pg);
+ return -EINVAL;
+ }
+
+ if ((sge_control & F_RXPKTCPLMODE) !=
+ V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+
+ /* Grab ingress packing boundary from SGE_CONTROL2 for */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
+ v = t4vf_query_params(adap, 1, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_err(adapter, "Unable to get SGE Control2; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_control2 = vals[0];
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ sge_ingress_rx_threshold = vals[0];
+ sge_congestion_control = vals[1];
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_warn(adap, "Unable to get VF SGE Queues/Page; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_egress_queues_per_page = vals[0];
+ sge_ingress_queues_per_page = vals[1];
+
+ /*
+ * We need the Queues/Page for our VF. This is based on the
+ * PF from which we're instantiated and is indexed in the
+ * register we just read.
+ */
+ s_hps = (S_HOSTPAGESIZEPF0 +
+ (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf);
+ sge_params->hps =
+ ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0);
+
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf);
+ sge_params->eq_qpp =
+ ((sge_egress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+ sge_params->iq_qpp =
+ ((sge_ingress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+
+ /*
+ * Now translate the queried parameters into our internal forms.
+ */
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+ s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE)
+ ? 128 : 64);
+ s->pktshift = G_PKTSHIFT(sge_control);
+ s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2);
+
+ /*
+ * A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.)
+ */
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T5:
+ s->fl_starve_thres =
+ G_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ case CHELSIO_T6:
+ default:
+ s->fl_starve_thres =
+ G_T6_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ }
+ s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
+
+ /*
+ * Save RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ s->timer_val[0] = core_ticks_to_us(adap,
+ G_TIMERVALUE0(sge_timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adap,
+ G_TIMERVALUE1(sge_timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adap,
+ G_TIMERVALUE2(sge_timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adap,
+ G_TIMERVALUE3(sge_timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adap,
+ G_TIMERVALUE4(sge_timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adap,
+ G_TIMERVALUE5(sge_timer_value_4_and_5));
+ s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold);
+ s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold);
+ s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold);
+ s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold);
+ return 0;
+}