aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-12-08 14:07:29 +0100
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-12-12 15:41:56 +0100
commit3d9b72106bd664b1267533e7278ff817f942e3c6 (patch)
tree52a6964f40e029d55d73bc18208971eff2297891 /drivers
parenta83862b166241910d0474f801cb2e6afa4c6875b (diff)
Imported Upstream version 16.11
Change-Id: I1944c65ddc88a9ad70f8c0eb6731552b84fbcb77 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c33
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c22
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c10
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c22
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c22
-rw-r--r--drivers/crypto/openssl/Makefile60
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c1062
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c708
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_private.h174
-rw-r--r--drivers/crypto/openssl/rte_pmd_openssl_version.map3
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_hw.h15
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h22
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c692
-rw-r--r--drivers/crypto/qat/qat_crypto.c524
-rw-r--r--drivers/crypto/qat/qat_qp.c2
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c26
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c24
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c6
-rw-r--r--drivers/crypto/zuc/Makefile69
-rw-r--r--drivers/crypto/zuc/rte_pmd_zuc_version.map3
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c550
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_ops.c342
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_private.h108
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c20
-rw-r--r--drivers/net/bnx2x/Makefile1
-rw-r--r--drivers/net/bnx2x/bnx2x.c34
-rw-r--r--drivers/net/bnx2x/bnx2x.h118
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c44
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c16
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.c349
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.h17
-rw-r--r--drivers/net/bnx2x/debug.c96
-rw-r--r--drivers/net/bnx2x/elink.c392
-rw-r--r--drivers/net/bnx2x/elink.h4
-rw-r--r--drivers/net/bnxt/Makefile1
-rw-r--r--drivers/net/bnxt/bnxt.h10
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c24
-rw-r--r--drivers/net/bnxt/bnxt_cpr.h1
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c187
-rw-r--r--drivers/net/bnxt/bnxt_filter.c3
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c138
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h1
-rw-r--r--drivers/net/bnxt/bnxt_irq.c156
-rw-r--r--drivers/net/bnxt/bnxt_irq.h51
-rw-r--r--drivers/net/bnxt/bnxt_stats.c4
-rw-r--r--drivers/net/bnxt/bnxt_txr.h1
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c5
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h8584
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c70
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c74
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h4
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c27
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c2
-rw-r--r--drivers/net/cxgbe/sge.c7
-rw-r--r--drivers/net/e1000/em_ethdev.c33
-rw-r--r--drivers/net/e1000/em_rxtx.c1
-rw-r--r--drivers/net/e1000/igb_ethdev.c116
-rw-r--r--drivers/net/e1000/igb_rxtx.c5
-rw-r--r--drivers/net/ena/ena_ethdev.c105
-rw-r--r--drivers/net/ena/ena_ethdev.h24
-rw-r--r--drivers/net/enic/base/vnic_dev.c33
-rw-r--r--drivers/net/enic/base/vnic_dev.h3
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h346
-rw-r--r--drivers/net/enic/base/vnic_rq.h1
-rw-r--r--drivers/net/enic/enic.h34
-rw-r--r--drivers/net/enic/enic_clsf.c292
-rw-r--r--drivers/net/enic/enic_ethdev.c79
-rw-r--r--drivers/net/enic/enic_main.c222
-rw-r--r--drivers/net/enic/enic_res.c5
-rw-r--r--drivers/net/enic/enic_rxtx.c60
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c32
-rw-r--r--drivers/net/fm10k/fm10k_rxtx.c4
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c26
-rw-r--r--drivers/net/i40e/Makefile8
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h132
-rw-r--r--drivers/net/i40e/base/i40e_common.c76
-rw-r--r--drivers/net/i40e/base/i40e_devids.h1
-rw-r--r--drivers/net/i40e/base/i40e_prototype.h3
-rw-r--r--drivers/net/i40e/base/i40e_type.h295
-rw-r--r--drivers/net/i40e/i40e_ethdev.c340
-rw-r--r--drivers/net/i40e/i40e_ethdev.h45
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c46
-rw-r--r--drivers/net/i40e/i40e_fdir.c46
-rw-r--r--drivers/net/i40e/i40e_pf.c6
-rw-r--r--drivers/net/i40e/i40e_rxtx.c665
-rw-r--r--drivers/net/i40e/i40e_rxtx.h563
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h251
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c614
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c (renamed from drivers/net/i40e/i40e_rxtx_vec.c)257
-rw-r--r--drivers/net/ixgbe/Makefile4
-rw-r--r--drivers/net/ixgbe/base/README2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.c6
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.c16
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.c11
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c145
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_mbx.h7
-rw-r--r--drivers/net/ixgbe/base/ixgbe_osdep.h1
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.c100
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.h71
-rw-r--r--drivers/net/ixgbe/base/ixgbe_type.h73
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.c90
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.h3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.c10
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.c1152
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.h59
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c494
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c42
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c40
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c70
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h204
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe_version.map13
-rw-r--r--drivers/net/mlx4/mlx4.c53
-rw-r--r--drivers/net/mlx4/mlx4.h2
-rw-r--r--drivers/net/mlx5/mlx5.c29
-rw-r--r--drivers/net/mlx5/mlx5_defs.h2
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c8
-rw-r--r--drivers/net/mlx5/mlx5_prm.h86
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c13
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c569
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h24
-rw-r--r--drivers/net/mlx5/mlx5_txq.c18
-rw-r--r--drivers/net/mpipe/mpipe_tilegx.c22
-rw-r--r--drivers/net/nfp/nfp_net.c65
-rw-r--r--drivers/net/null/rte_eth_null.c20
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c654
-rw-r--r--drivers/net/qede/Makefile6
-rw-r--r--drivers/net/qede/base/bcm_osal.c25
-rw-r--r--drivers/net/qede/base/bcm_osal.h10
-rw-r--r--drivers/net/qede/base/common_hsi.h956
-rw-r--r--drivers/net/qede/base/ecore.h631
-rw-r--r--drivers/net/qede/base/ecore_chain.h51
-rw-r--r--drivers/net/qede/base/ecore_cxt.c387
-rw-r--r--drivers/net/qede/base/ecore_cxt.h52
-rw-r--r--drivers/net/qede/base/ecore_cxt_api.h25
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c589
-rw-r--r--drivers/net/qede/base/ecore_dcbx.h18
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h154
-rw-r--r--drivers/net/qede/base/ecore_dev.c1813
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h238
-rw-r--r--drivers/net/qede/base/ecore_gtt_reg_addr.h30
-rw-r--r--drivers/net/qede/base/ecore_gtt_values.h20
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h1358
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h1025
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h997
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_func.h132
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_tool.h454
-rw-r--r--drivers/net/qede/base/ecore_hsi_tools.h1081
-rw-r--r--drivers/net/qede/base/ecore_hw.c222
-rw-r--r--drivers/net/qede/base/ecore_hw.h75
-rw-r--r--drivers/net/qede/base/ecore_hw_defs.h39
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c400
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h250
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c11
-rw-r--r--drivers/net/qede/base/ecore_init_ops.h14
-rw-r--r--drivers/net/qede/base/ecore_int.c446
-rw-r--r--drivers/net/qede/base/ecore_int.h23
-rw-r--r--drivers/net/qede/base/ecore_int_api.h11
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h519
-rw-r--r--drivers/net/qede/base/ecore_iro.h234
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h140
-rw-r--r--drivers/net/qede/base/ecore_l2.c531
-rw-r--r--drivers/net/qede/base/ecore_l2.h85
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h167
-rw-r--r--drivers/net/qede/base/ecore_mcp.c881
-rw-r--r--drivers/net/qede/base/ecore_mcp.h141
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h220
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h63
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h869
-rw-r--r--drivers/net/qede/base/ecore_sp_api.h15
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c99
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h38
-rw-r--r--drivers/net/qede/base/ecore_spq.c237
-rw-r--r--drivers/net/qede/base/ecore_spq.h162
-rw-r--r--drivers/net/qede/base/ecore_sriov.c1818
-rw-r--r--drivers/net/qede/base/ecore_sriov.h247
-rw-r--r--drivers/net/qede/base/ecore_status.h18
-rw-r--r--drivers/net/qede/base/ecore_vf.c759
-rw-r--r--drivers/net/qede/base/ecore_vf.h258
-rw-r--r--drivers/net/qede/base/ecore_vf_api.h100
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h436
-rw-r--r--drivers/net/qede/base/eth_common.h439
-rw-r--r--drivers/net/qede/base/mcp_public.h825
-rw-r--r--drivers/net/qede/base/nvm_cfg.h2183
-rw-r--r--drivers/net/qede/base/reg_addr.h36
-rw-r--r--drivers/net/qede/qede_eth_if.c75
-rw-r--r--drivers/net/qede/qede_eth_if.h16
-rw-r--r--drivers/net/qede/qede_ethdev.c548
-rw-r--r--drivers/net/qede/qede_ethdev.h84
-rw-r--r--drivers/net/qede/qede_if.h13
-rw-r--r--drivers/net/qede/qede_main.c87
-rw-r--r--drivers/net/qede/qede_rxtx.c763
-rw-r--r--drivers/net/qede/qede_rxtx.h25
-rw-r--r--drivers/net/ring/rte_eth_ring.c50
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c33
-rw-r--r--drivers/net/thunderx/Makefile2
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.c72
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.h76
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.c37
-rw-r--r--drivers/net/thunderx/base/nicvf_hw.h29
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h3
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.c47
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.h21
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c858
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.h41
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c3
-rw-r--r--drivers/net/thunderx/nicvf_struct.h6
-rw-r--r--drivers/net/thunderx/nicvf_svf.c78
-rw-r--r--drivers/net/thunderx/nicvf_svf.h66
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c380
-rw-r--r--drivers/net/vhost/rte_eth_vhost.h9
-rw-r--r--drivers/net/vhost/rte_pmd_vhost_version.map6
-rw-r--r--drivers/net/virtio/Makefile7
-rw-r--r--drivers/net/virtio/virtio_ethdev.c477
-rw-r--r--drivers/net/virtio/virtio_ethdev.h38
-rw-r--r--drivers/net/virtio/virtio_pci.c5
-rw-r--r--drivers/net/virtio/virtio_pci.h15
-rw-r--r--drivers/net/virtio/virtio_rxtx.c564
-rw-r--r--drivers/net/virtio/virtio_rxtx.h3
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c272
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.h136
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_neon.c235
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_sse.c222
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c20
-rw-r--r--drivers/net/virtio/virtqueue.h9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c149
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h32
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ring.h22
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c60
-rw-r--r--drivers/net/xenvirt/rte_eth_xenvirt.c20
-rw-r--r--drivers/net/xenvirt/rte_eth_xenvirt.h1
236 files changed, 34120 insertions, 18571 deletions
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index dc4ef7f9..745c6146 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -33,9 +33,11 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index dc0b0337..dba5e158 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -37,7 +37,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -230,11 +230,20 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
op->cipher.data.offset);
/* sanity checks */
- if (op->cipher.iv.length != 16 && op->cipher.iv.length != 0) {
+ if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
+ op->cipher.iv.length != 0) {
GCM_LOG_ERR("iv");
return -1;
}
+ /*
+ * GCM working in 12B IV mode => 16B pre-counter block we need
+ * to set BE LSB to 1, driver expects that 16B is allocated
+ */
+ if (op->cipher.iv.length == 12) {
+ op->cipher.iv.data[15] = 1;
+ }
+
if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
op->auth.aad.length != 0) {
GCM_LOG_ERR("iv");
@@ -395,7 +404,7 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair,
return nb_dequeued;
}
-static int aesni_gcm_uninit(const char *name);
+static int aesni_gcm_remove(const char *name);
static int
aesni_gcm_create(const char *name,
@@ -477,12 +486,12 @@ aesni_gcm_create(const char *name,
init_error:
GCM_LOG_ERR("driver %s: create failed", name);
- aesni_gcm_uninit(crypto_dev_name);
+ aesni_gcm_remove(crypto_dev_name);
return -EFAULT;
}
static int
-aesni_gcm_init(const char *name, const char *input_args)
+aesni_gcm_probe(const char *name, const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
@@ -503,7 +512,7 @@ aesni_gcm_init(const char *name, const char *input_args)
}
static int
-aesni_gcm_uninit(const char *name)
+aesni_gcm_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
@@ -514,14 +523,14 @@ aesni_gcm_uninit(const char *name)
return 0;
}
-static struct rte_driver aesni_gcm_pmd_drv = {
- .type = PMD_VDEV,
- .init = aesni_gcm_init,
- .uninit = aesni_gcm_uninit
+static struct rte_vdev_driver aesni_gcm_pmd_drv = {
+ .probe = aesni_gcm_probe,
+ .remove = aesni_gcm_remove
};
-PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv, CRYPTODEV_NAME_AESNI_GCM_PMD);
-DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index b2d0c8ca..f07cd077 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -34,7 +34,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -595,7 +595,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
}
-static int cryptodev_aesni_mb_uninit(const char *name);
+static int cryptodev_aesni_mb_remove(const char *name);
static int
cryptodev_aesni_mb_create(const char *name,
@@ -675,13 +675,13 @@ cryptodev_aesni_mb_create(const char *name,
init_error:
MB_LOG_ERR("driver %s: cryptodev_aesni_create failed", name);
- cryptodev_aesni_mb_uninit(crypto_dev_name);
+ cryptodev_aesni_mb_remove(crypto_dev_name);
return -EFAULT;
}
static int
-cryptodev_aesni_mb_init(const char *name,
+cryptodev_aesni_mb_probe(const char *name,
const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
@@ -703,7 +703,7 @@ cryptodev_aesni_mb_init(const char *name,
}
static int
-cryptodev_aesni_mb_uninit(const char *name)
+cryptodev_aesni_mb_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
@@ -714,14 +714,14 @@ cryptodev_aesni_mb_uninit(const char *name)
return 0;
}
-static struct rte_driver cryptodev_aesni_mb_pmd_drv = {
- .type = PMD_VDEV,
- .init = cryptodev_aesni_mb_init,
- .uninit = cryptodev_aesni_mb_uninit
+static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
+ .probe = cryptodev_aesni_mb_probe,
+ .remove = cryptodev_aesni_mb_remove
};
-PMD_REGISTER_DRIVER(cryptodev_aesni_mb_pmd_drv, CRYPTODEV_NAME_AESNI_MB_PMD);
-DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index d3c46ace..3d49e2ae 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -311,8 +311,14 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
static int
aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
{
- if (dev->data->queue_pairs[qp_id] != NULL) {
- rte_free(dev->data->queue_pairs[qp_id]);
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+ struct rte_ring *r = NULL;
+
+ if (qp != NULL) {
+ r = rte_ring_lookup(qp->name);
+ if (r)
+ rte_ring_free(r);
+ rte_free(qp);
dev->data->queue_pairs[qp_id] = NULL;
}
return 0;
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index df1eb529..b119da28 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -35,7 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -556,7 +556,7 @@ kasumi_pmd_dequeue_burst(void *queue_pair,
return nb_dequeued;
}
-static int cryptodev_kasumi_uninit(const char *name);
+static int cryptodev_kasumi_remove(const char *name);
static int
cryptodev_kasumi_create(const char *name,
@@ -611,12 +611,12 @@ cryptodev_kasumi_create(const char *name,
init_error:
KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed", name);
- cryptodev_kasumi_uninit(crypto_dev_name);
+ cryptodev_kasumi_remove(crypto_dev_name);
return -EFAULT;
}
static int
-cryptodev_kasumi_init(const char *name,
+cryptodev_kasumi_probe(const char *name,
const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
@@ -638,7 +638,7 @@ cryptodev_kasumi_init(const char *name,
}
static int
-cryptodev_kasumi_uninit(const char *name)
+cryptodev_kasumi_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
@@ -650,14 +650,14 @@ cryptodev_kasumi_uninit(const char *name)
return 0;
}
-static struct rte_driver cryptodev_kasumi_pmd_drv = {
- .type = PMD_VDEV,
- .init = cryptodev_kasumi_init,
- .uninit = cryptodev_kasumi_uninit
+static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
+ .probe = cryptodev_kasumi_probe,
+ .remove = cryptodev_kasumi_remove
};
-PMD_REGISTER_DRIVER(cryptodev_kasumi_pmd_drv, CRYPTODEV_NAME_KASUMI_PMD);
-DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index 909b04f9..c69606b3 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -33,7 +33,7 @@
#include <rte_common.h>
#include <rte_config.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include "null_crypto_pmd_private.h"
@@ -182,7 +182,7 @@ null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return nb_dequeued;
}
-static int cryptodev_null_uninit(const char *name);
+static int cryptodev_null_remove(const char *name);
/** Create crypto device */
static int
@@ -227,14 +227,14 @@ cryptodev_null_create(const char *name,
init_error:
NULL_CRYPTO_LOG_ERR("driver %s: cryptodev_null_create failed", name);
- cryptodev_null_uninit(crypto_dev_name);
+ cryptodev_null_remove(crypto_dev_name);
return -EFAULT;
}
/** Initialise null crypto device */
static int
-cryptodev_null_init(const char *name,
+cryptodev_null_probe(const char *name,
const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
@@ -257,7 +257,7 @@ cryptodev_null_init(const char *name,
/** Uninitialise null crypto device */
static int
-cryptodev_null_uninit(const char *name)
+cryptodev_null_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
@@ -268,14 +268,14 @@ cryptodev_null_uninit(const char *name)
return 0;
}
-static struct rte_driver cryptodev_null_pmd_drv = {
- .type = PMD_VDEV,
- .init = cryptodev_null_init,
- .uninit = cryptodev_null_uninit
+static struct rte_vdev_driver cryptodev_null_pmd_drv = {
+ .probe = cryptodev_null_probe,
+ .remove = cryptodev_null_remove
};
-PMD_REGISTER_DRIVER(cryptodev_null_pmd_drv, CRYPTODEV_NAME_NULL_PMD);
-DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
diff --git a/drivers/crypto/openssl/Makefile b/drivers/crypto/openssl/Makefile
new file mode 100644
index 00000000..8c4250c8
--- /dev/null
+++ b/drivers/crypto/openssl/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_openssl.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_openssl_version.map
+
+# external library dependencies
+LDLIBS += -lcrypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd_ops.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
new file mode 100644
index 00000000..5f8fa331
--- /dev/null
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -0,0 +1,1062 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include <openssl/evp.h>
+
+#include "rte_openssl_pmd_private.h"
+
+static int cryptodev_openssl_remove(const char *name);
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * Global static parameter used to create a unique name for each
+ * OPENSSL crypto device.
+ */
+static unsigned int unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%s_%u",
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
+ unique_name_id++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Increment counter by 1
+ * Counter is 64 bit array, big-endian
+ */
+static void
+ctr_inc(uint8_t *ctr)
+{
+ uint64_t *ctr64 = (uint64_t *)ctr;
+
+ *ctr64 = __builtin_bswap64(*ctr64);
+ (*ctr64)++;
+ *ctr64 = __builtin_bswap64(*ctr64);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum openssl_chain_order
+openssl_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ enum openssl_chain_order res = OPENSSL_CHAIN_NOT_SUPPORTED;
+
+ if (xform != NULL) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ res = OPENSSL_CHAIN_ONLY_AUTH;
+ else if (xform->next->type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER)
+ res = OPENSSL_CHAIN_AUTH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ res = OPENSSL_CHAIN_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ res = OPENSSL_CHAIN_CIPHER_AUTH;
+ }
+ }
+
+ return res;
+}
+
+/** Get session cipher key from input cipher key */
+static void
+get_cipher_key(uint8_t *input_key, int keylen, uint8_t *session_key)
+{
+ memcpy(session_key, input_key, keylen);
+}
+
+/** Get key ede 24 bytes standard from input key */
+static int
+get_cipher_key_ede(uint8_t *key, int keylen, uint8_t *key_ede)
+{
+ int res = 0;
+
+ /* Initialize keys - 24 bytes: [key1-key2-key3] */
+ switch (keylen) {
+ case 24:
+ memcpy(key_ede, key, 24);
+ break;
+ case 16:
+ /* K3 = K1 */
+ memcpy(key_ede, key, 16);
+ memcpy(key_ede + 16, key, 8);
+ break;
+ case 8:
+ /* K1 = K2 = K3 (DES compatibility) */
+ memcpy(key_ede, key, 8);
+ memcpy(key_ede + 8, key, 8);
+ memcpy(key_ede + 16, key, 8);
+ break;
+ default:
+ OPENSSL_LOG_ERR("Unsupported key size");
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input cipher algorithm */
+static uint8_t
+get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen,
+ const EVP_CIPHER **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sess_algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_des_ede_cbc();
+ break;
+ case 24:
+ *algo = EVP_des_ede3_cbc();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_cbc();
+ break;
+ case 24:
+ *algo = EVP_aes_192_cbc();
+ break;
+ case 32:
+ *algo = EVP_aes_256_cbc();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_ctr();
+ break;
+ case 24:
+ *algo = EVP_aes_192_ctr();
+ break;
+ case 32:
+ *algo = EVP_aes_256_ctr();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_gcm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_gcm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_gcm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input auth algorithm */
+static uint8_t
+get_auth_algo(enum rte_crypto_auth_algorithm sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sessalgo) {
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Set session cipher parameters */
+static int
+openssl_set_session_cipher_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select cipher direction */
+ sess->cipher.direction = xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = xform->cipher.key.length;
+
+ /* Select cipher algo */
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.algo = xform->cipher.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_cipher_algo(sess->cipher.algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+
+ break;
+
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ sess->cipher.mode = OPENSSL_CIPHER_DES3CTR;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_cipher_key_ede(xform->cipher.key.data,
+ sess->cipher.key.length,
+ sess->cipher.key.data) != 0)
+ return -EINVAL;
+ break;
+
+ default:
+ sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set session auth parameters */
+static int
+openssl_set_session_auth_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+ sess->auth.algo = xform->auth.algo;
+
+ /* Select auth algo */
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ /* Check additional condition for AES_GMAC/GCM */
+ if (sess->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM)
+ return -EINVAL;
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+ break;
+
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.mode = OPENSSL_AUTH_AS_AUTH;
+ if (get_auth_algo(xform->auth.algo,
+ &sess->auth.auth.evp_algo) != 0)
+ return -EINVAL;
+ sess->auth.auth.ctx = EVP_MD_CTX_create();
+ break;
+
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ sess->auth.mode = OPENSSL_AUTH_AS_HMAC;
+ sess->auth.hmac.ctx = EVP_MD_CTX_create();
+ if (get_auth_algo(xform->auth.algo,
+ &sess->auth.hmac.evp_algo) != 0)
+ return -EINVAL;
+ sess->auth.hmac.pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL,
+ xform->auth.key.data, xform->auth.key.length);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+
+ sess->chain_order = openssl_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case OPENSSL_CHAIN_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case OPENSSL_CHAIN_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case OPENSSL_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case OPENSSL_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* cipher_xform must be check before auth_xform */
+ if (cipher_xform) {
+ if (openssl_set_session_cipher_parameters(
+ sess, cipher_xform)) {
+ OPENSSL_LOG_ERR(
+ "Invalid/unsupported cipher parameters");
+ return -EINVAL;
+ }
+ }
+
+ if (auth_xform) {
+ if (openssl_set_session_auth_parameters(sess, auth_xform)) {
+ OPENSSL_LOG_ERR(
+ "Invalid/unsupported auth parameters");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/** Reset private session parameters */
+void
+openssl_reset_session(struct openssl_session *sess)
+{
+ EVP_CIPHER_CTX_free(sess->cipher.ctx);
+
+ switch (sess->auth.mode) {
+ case OPENSSL_AUTH_AS_AUTH:
+ EVP_MD_CTX_destroy(sess->auth.auth.ctx);
+ break;
+ case OPENSSL_AUTH_AS_HMAC:
+ EVP_PKEY_free(sess->auth.hmac.pkey);
+ EVP_MD_CTX_destroy(sess->auth.hmac.ctx);
+ break;
+ default:
+ break;
+ }
+}
+
+/** Provide session for operation */
+static struct openssl_session *
+get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
+{
+ struct openssl_session *sess = NULL;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL &&
+ op->sym->session->dev_type ==
+ RTE_CRYPTODEV_OPENSSL_PMD))
+ sess = (struct openssl_session *)
+ op->sym->session->_private;
+ } else {
+ /* provide internal session */
+ void *_sess = NULL;
+
+ if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
+ sess = (struct openssl_session *)
+ ((struct rte_cryptodev_sym_session *)_sess)
+ ->_private;
+
+ if (unlikely(openssl_set_session_parameters(
+ sess, op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ } else
+ op->sym->session = _sess;
+ }
+ }
+
+ if (sess == NULL)
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+
+/** Process standard openssl cipher encryption */
+static int
+process_openssl_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, uint8_t *key, int srclen,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int dstlen, totlen;
+
+ if (EVP_EncryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
+ goto process_cipher_encrypt_err;
+
+ if (EVP_EncryptUpdate(ctx, dst, &dstlen, src, srclen) <= 0)
+ goto process_cipher_encrypt_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ goto process_cipher_encrypt_err;
+
+ return 0;
+
+process_cipher_encrypt_err:
+ OPENSSL_LOG_ERR("Process openssl cipher encrypt failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl cipher decryption */
+static int
+process_openssl_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, uint8_t *key, int srclen,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int dstlen, totlen;
+
+ if (EVP_DecryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
+ goto process_cipher_decrypt_err;
+
+ if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+ goto process_cipher_decrypt_err;
+
+ if (EVP_DecryptUpdate(ctx, dst, &dstlen, src, srclen) <= 0)
+ goto process_cipher_decrypt_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ goto process_cipher_decrypt_err;
+
+ return 0;
+
+process_cipher_decrypt_err:
+ OPENSSL_LOG_ERR("Process openssl cipher decrypt failed");
+ return -EINVAL;
+}
+
+/** Process cipher des 3 ctr encryption, decryption algorithm */
+static int
+process_openssl_cipher_des3ctr(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, uint8_t *key, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ uint8_t ebuf[8], ctr[8];
+ int unused, n;
+
+ /* We use 3DES encryption also for decryption.
+ * IV is not important for 3DES ecb
+ */
+ if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0)
+ goto process_cipher_des3ctr_err;
+
+ memcpy(ctr, iv, 8);
+ n = 0;
+
+ while (n < srclen) {
+ if (n % 8 == 0) {
+ if (EVP_EncryptUpdate(ctx,
+ (unsigned char *)&ebuf, &unused,
+ (const unsigned char *)&ctr, 8) <= 0)
+ goto process_cipher_des3ctr_err;
+ ctr_inc(ctr);
+ }
+ dst[n] = src[n] ^ ebuf[n % 8];
+ n++;
+ }
+
+ return 0;
+
+process_cipher_des3ctr_err:
+ OPENSSL_LOG_ERR("Process openssl cipher des 3 ede ctr failed");
+ return -EINVAL;
+}
+
+/** Process auth/encription aes-gcm algorithm */
+static int
+process_openssl_auth_encryption_gcm(uint8_t *src, int srclen,
+ uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
+ uint8_t *key, uint8_t *dst, uint8_t *tag,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int len = 0, unused = 0;
+ uint8_t empty[] = {};
+
+ if (EVP_EncryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (aadlen > 0) {
+ if (EVP_EncryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_encryption_gcm_err;
+ }
+
+ if (srclen > 0)
+ if (EVP_EncryptUpdate(ctx, dst, &len, src, srclen) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst + len, &len) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, 16, tag) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ return 0;
+
+process_auth_encryption_gcm_err:
+ OPENSSL_LOG_ERR("Process openssl auth encryption gcm failed");
+ return -EINVAL;
+}
+
+static int
+process_openssl_auth_decryption_gcm(uint8_t *src, int srclen,
+ uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
+ uint8_t *key, uint8_t *dst, uint8_t *tag,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int len = 0, unused = 0;
+ uint8_t empty[] = {};
+
+ if (EVP_DecryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (aadlen > 0) {
+ if (EVP_DecryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_decryption_gcm_err;
+ }
+
+ if (srclen > 0)
+ if (EVP_DecryptUpdate(ctx, dst, &len, src, srclen) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst + len, &len) <= 0)
+ goto process_auth_decryption_gcm_final_err;
+
+ return 0;
+
+process_auth_decryption_gcm_err:
+ OPENSSL_LOG_ERR("Process openssl auth description gcm failed");
+ return -EINVAL;
+
+process_auth_decryption_gcm_final_err:
+ return -EFAULT;
+}
+
+/** Process standard openssl auth algorithms */
+static int
+process_openssl_auth(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv, __rte_unused EVP_PKEY * pkey,
+ int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+{
+ size_t dstlen;
+
+ if (EVP_DigestInit_ex(ctx, algo, NULL) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestFinal_ex(ctx, dst, (unsigned int *)&dstlen) <= 0)
+ goto process_auth_err;
+
+ return 0;
+
+process_auth_err:
+ OPENSSL_LOG_ERR("Process openssl auth failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl auth algorithms with hmac */
+static int
+process_openssl_auth_hmac(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv, EVP_PKEY *pkey,
+ int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+{
+ size_t dstlen;
+
+ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignFinal(ctx, dst, &dstlen) <= 0)
+ goto process_auth_err;
+
+ return 0;
+
+process_auth_err:
+ OPENSSL_LOG_ERR("Process openssl auth failed");
+ return -EINVAL;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/** Process auth/cipher combined operation */
+static void
+process_openssl_combined_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ /* cipher */
+ uint8_t *src = NULL, *dst = NULL, *iv, *tag, *aad;
+ int srclen, ivlen, aadlen, status = -1;
+
+ iv = op->sym->cipher.iv.data;
+ ivlen = op->sym->cipher.iv.length;
+ aad = op->sym->auth.aad.data;
+ aadlen = op->sym->auth.aad.length;
+
+ tag = op->sym->auth.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset +
+ op->sym->cipher.data.length);
+
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
+ srclen = 0;
+ else {
+ srclen = op->sym->cipher.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+ }
+
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ status = process_openssl_auth_encryption_gcm(
+ src, srclen, aad, aadlen, iv, ivlen,
+ sess->cipher.key.data, dst, tag,
+ sess->cipher.ctx, sess->cipher.evp_algo);
+ else
+ status = process_openssl_auth_decryption_gcm(
+ src, srclen, aad, aadlen, iv, ivlen,
+ sess->cipher.key.data, dst, tag,
+ sess->cipher.ctx, sess->cipher.evp_algo);
+
+ if (status != 0) {
+ if (status == (-EFAULT) &&
+ sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+}
+
+/** Process cipher operation */
+static void
+process_openssl_cipher_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *src, *dst, *iv;
+ int srclen, status;
+
+ srclen = op->sym->cipher.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ iv = op->sym->cipher.iv.data;
+
+ if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ status = process_openssl_cipher_encrypt(src, dst, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx,
+ sess->cipher.evp_algo);
+ else
+ status = process_openssl_cipher_decrypt(src, dst, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx,
+ sess->cipher.evp_algo);
+ else
+ status = process_openssl_cipher_des3ctr(src, dst, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx);
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process auth operation */
+static void
+process_openssl_auth_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *src, *dst;
+ int srclen, status;
+
+ srclen = op->sym->auth.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
+ dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
+ op->sym->auth.digest.length);
+ else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL)
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ }
+
+ switch (sess->auth.mode) {
+ case OPENSSL_AUTH_AS_AUTH:
+ status = process_openssl_auth(src, dst,
+ NULL, NULL, srclen,
+ sess->auth.auth.ctx, sess->auth.auth.evp_algo);
+ break;
+ case OPENSSL_AUTH_AS_HMAC:
+ status = process_openssl_auth_hmac(src, dst,
+ NULL, sess->auth.hmac.pkey, srclen,
+ sess->auth.hmac.ctx, sess->auth.hmac.evp_algo);
+ break;
+ default:
+ status = -1;
+ break;
+ }
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ op->sym->auth.digest.length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(mbuf_src,
+ op->sym->auth.digest.length);
+ }
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_session *sess)
+{
+ struct rte_mbuf *msrc, *mdst;
+ int retval;
+
+ msrc = op->sym->m_src;
+ mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->chain_order) {
+ case OPENSSL_CHAIN_ONLY_CIPHER:
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_ONLY_AUTH:
+ process_openssl_auth_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_CIPHER_AUTH:
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ process_openssl_auth_op(op, sess, mdst, mdst);
+ break;
+ case OPENSSL_CHAIN_AUTH_CIPHER:
+ process_openssl_auth_op(op, sess, msrc, mdst);
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_COMBINED:
+ process_openssl_combined_op(op, sess, msrc, mdst);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ openssl_reset_session(sess);
+ memset(sess, 0, sizeof(struct openssl_session));
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (op->status != RTE_CRYPTO_OP_STATUS_ERROR)
+ retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
+ else
+ retval = -1;
+
+ return retval;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct openssl_session *sess;
+ struct openssl_qp *qp = queue_pair;
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ qp->stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct openssl_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops);
+ qp->stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create OPENSSL crypto device */
+static int
+cryptodev_openssl_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct openssl_private *internals;
+
+ /* create a unique device name */
+ if (create_unique_device_name(crypto_dev_name,
+ RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+ OPENSSL_LOG_ERR("failed to create unique cryptodev name");
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ sizeof(struct openssl_private),
+ init_params->socket_id);
+ if (dev == NULL) {
+ OPENSSL_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_OPENSSL_PMD;
+ dev->dev_ops = rte_openssl_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = openssl_pmd_dequeue_burst;
+ dev->enqueue_burst = openssl_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+
+init_error:
+ OPENSSL_LOG_ERR("driver %s: cryptodev_openssl_create failed", name);
+
+ cryptodev_openssl_remove(crypto_dev_name);
+ return -EFAULT;
+}
+
+/** Initialise OPENSSL crypto device */
+static int
+cryptodev_openssl_probe(const char *name,
+ const char *input_args)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id()
+ };
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_openssl_create(name, &init_params);
+}
+
+/** Uninitialise OPENSSL crypto device */
+static int
+cryptodev_openssl_remove(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing OPENSSL crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_openssl_pmd_drv = {
+ .probe = cryptodev_openssl_probe,
+ .remove = cryptodev_openssl_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD,
+ cryptodev_openssl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
new file mode 100644
index 00000000..875550c7
--- /dev/null
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -0,0 +1,708 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_openssl_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 65532,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+openssl_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+openssl_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+openssl_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+openssl_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+openssl_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct openssl_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+openssl_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct openssl_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+openssl_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct openssl_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = openssl_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/** Release queue pair */
+static int
+openssl_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+openssl_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct openssl_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "openssl_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->prod.size >= ring_size) {
+ OPENSSL_LOG_INFO(
+ "Reusing existing ring %s for processed ops",
+ qp->name);
+ return r;
+ }
+
+ OPENSSL_LOG_ERR(
+ "Unable to reuse existing ring %s for processed ops",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+openssl_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct openssl_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ openssl_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("OPENSSL PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (openssl_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = openssl_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+openssl_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+openssl_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+openssl_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure */
+static unsigned
+openssl_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct openssl_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static void *
+openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ OPENSSL_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (openssl_set_session_parameters(
+ sess, xform) != 0) {
+ OPENSSL_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+openssl_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess) {
+ openssl_reset_session(sess);
+ memset(sess, 0, sizeof(struct openssl_session));
+ }
+}
+
+struct rte_cryptodev_ops openssl_pmd_ops = {
+ .dev_configure = openssl_pmd_config,
+ .dev_start = openssl_pmd_start,
+ .dev_stop = openssl_pmd_stop,
+ .dev_close = openssl_pmd_close,
+
+ .stats_get = openssl_pmd_stats_get,
+ .stats_reset = openssl_pmd_stats_reset,
+
+ .dev_infos_get = openssl_pmd_info_get,
+
+ .queue_pair_setup = openssl_pmd_qp_setup,
+ .queue_pair_release = openssl_pmd_qp_release,
+ .queue_pair_start = openssl_pmd_qp_start,
+ .queue_pair_stop = openssl_pmd_qp_stop,
+ .queue_pair_count = openssl_pmd_qp_count,
+
+ .session_get_size = openssl_pmd_session_get_size,
+ .session_configure = openssl_pmd_session_configure,
+ .session_clear = openssl_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_openssl_pmd_ops = &openssl_pmd_ops;
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h
new file mode 100644
index 00000000..65c5f979
--- /dev/null
+++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -0,0 +1,174 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _OPENSSL_PMD_PRIVATE_H_
+#define _OPENSSL_PMD_PRIVATE_H_
+
+#include <openssl/evp.h>
+#include <openssl/des.h>
+
+
+#define OPENSSL_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_OPENSSL_DEBUG
+#define OPENSSL_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
+ __func__, __LINE__, ## args)
+
+#define OPENSSL_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define OPENSSL_LOG_INFO(fmt, args...)
+#define OPENSSL_LOG_DBG(fmt, args...)
+#endif
+
+
+/** OPENSSL operation order mode enumerator */
+enum openssl_chain_order {
+ OPENSSL_CHAIN_ONLY_CIPHER,
+ OPENSSL_CHAIN_ONLY_AUTH,
+ OPENSSL_CHAIN_CIPHER_AUTH,
+ OPENSSL_CHAIN_AUTH_CIPHER,
+ OPENSSL_CHAIN_COMBINED,
+ OPENSSL_CHAIN_NOT_SUPPORTED
+};
+
+/** OPENSSL cipher mode enumerator */
+enum openssl_cipher_mode {
+ OPENSSL_CIPHER_LIB,
+ OPENSSL_CIPHER_DES3CTR,
+};
+
+/** OPENSSL auth mode enumerator */
+enum openssl_auth_mode {
+ OPENSSL_AUTH_AS_AUTH,
+ OPENSSL_AUTH_AS_HMAC,
+};
+
+/** private data structure for each OPENSSL crypto device */
+struct openssl_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+ unsigned int max_nb_sessions;
+ /**< Max number of sessions */
+};
+
+/** OPENSSL crypto queue pair */
+struct openssl_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+/** OPENSSL crypto private session structure */
+struct openssl_session {
+ enum openssl_chain_order chain_order;
+ /**< chain order mode */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum openssl_cipher_mode mode;
+ /**< cipher operation mode */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+
+ struct {
+ uint8_t data[32];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ const EVP_CIPHER *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_CIPHER_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum openssl_auth_mode mode;
+ /**< auth operation mode */
+ enum rte_crypto_auth_algorithm algo;
+ /**< cipher algorithm */
+
+ union {
+ struct {
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_MD_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } auth;
+
+ struct {
+ EVP_PKEY *pkey;
+ /**< pointer to EVP key */
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_MD_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } hmac;
+ };
+ } auth;
+
+} __rte_cache_aligned;
+
+/** Set and validate OPENSSL crypto session parameters */
+extern int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** Reset OPENSSL crypto session parameters */
+extern void
+openssl_reset_session(struct openssl_session *sess);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_openssl_pmd_ops;
+
+#endif /* _OPENSSL_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/openssl/rte_pmd_openssl_version.map b/drivers/crypto/openssl/rte_pmd_openssl_version.map
new file mode 100644
index 00000000..cc5829e3
--- /dev/null
+++ b/drivers/crypto/openssl/rte_pmd_openssl_version.map
@@ -0,0 +1,3 @@
+DPDK_16.11 {
+ local: *;
+};
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
index 4d4d8e4d..ebe245f6 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
@@ -237,6 +237,11 @@ enum icp_qat_hw_cipher_dir {
ICP_QAT_HW_CIPHER_DECRYPT = 1,
};
+enum icp_qat_hw_auth_op {
+ ICP_QAT_HW_AUTH_VERIFY = 0,
+ ICP_QAT_HW_AUTH_GENERATE = 1,
+};
+
enum icp_qat_hw_cipher_convert {
ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
@@ -293,14 +298,12 @@ enum icp_qat_hw_cipher_convert {
#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
-#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
-struct icp_qat_hw_cipher_aes256_f8 {
- struct icp_qat_hw_cipher_config cipher_config;
- uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
-};
+#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
struct icp_qat_hw_cipher_algo_blk {
- struct icp_qat_hw_cipher_aes256_f8 aes;
+ struct icp_qat_hw_cipher_config cipher_config;
+ uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
} __rte_cache_aligned;
+
#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index 243c1b40..dcc0df59 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -51,6 +51,18 @@
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
+/*
+ * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
+ * Integrity Key (IK)
+ */
+#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
+
+#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
+
+/* 3DES key sizes */
+#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
+#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
+
#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
ICP_QAT_HW_CIPHER_NO_CONVERT, \
@@ -86,11 +98,13 @@ struct qat_session {
enum icp_qat_hw_cipher_dir qat_dir;
enum icp_qat_hw_cipher_mode qat_mode;
enum icp_qat_hw_auth_algo qat_hash_alg;
+ enum icp_qat_hw_auth_op auth_op;
struct qat_alg_cd cd;
+ uint8_t *cd_cur_ptr;
phys_addr_t cd_paddr;
struct icp_qat_fw_la_bulk_req fw_req;
+ uint8_t aad_len;
struct qat_crypto_instance *inst;
- uint8_t salt[ICP_QAT_HW_AES_BLK_SZ];
rte_spinlock_t lock; /* protects this struct */
};
@@ -115,7 +129,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint32_t digestsize,
unsigned int operation);
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header);
+void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ uint16_t proto);
void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cd,
int alg, const uint8_t *key,
@@ -127,5 +142,6 @@ void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-
+int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 185bb334..8900668d 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -58,6 +58,7 @@
#include <openssl/sha.h> /* Needed to calculate pre-compute values */
#include <openssl/aes.h> /* Needed to calculate pre-compute values */
+#include <openssl/md5.h> /* Needed to calculate pre-compute values */
/*
@@ -70,9 +71,15 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_SHA1:
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_SHA256:
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_SHA512:
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
@@ -86,6 +93,12 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@@ -103,10 +116,16 @@ static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
switch (qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
return ICP_QAT_HW_SHA1_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return ICP_QAT_HW_SHA224_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
return ICP_QAT_HW_SHA256_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return ICP_QAT_HW_SHA384_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
return ICP_QAT_HW_SHA512_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return ICP_QAT_HW_MD5_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum digest size in this case */
return ICP_QAT_HW_SHA512_STATE1_SZ;
@@ -123,12 +142,18 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
switch (qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
return SHA_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return SHA256_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return SHA512_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
return SHA512_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
return 16;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return MD5_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum block size in this case */
return SHA512_CBLOCK;
@@ -150,6 +175,17 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
return 0;
}
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
{
SHA256_CTX ctx;
@@ -161,6 +197,17 @@ static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
return 0;
}
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
{
SHA512_CTX ctx;
@@ -172,6 +219,18 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
return 0;
}
+static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
+{
+ MD5_CTX ctx;
+
+ if (!MD5_Init(&ctx))
+ return -EFAULT;
+ MD5_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
+
+ return 0;
+}
+
static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
uint8_t *data_in,
uint8_t *data_out)
@@ -199,6 +258,13 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
*hash_state_out_be32 =
rte_bswap32(*(((uint32_t *)digest)+i));
break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (partial_hash_sha224(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
if (partial_hash_sha256(data_in, digest))
return -EFAULT;
@@ -206,6 +272,13 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
*hash_state_out_be32 =
rte_bswap32(*(((uint32_t *)digest)+i));
break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (partial_hash_sha384(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
if (partial_hash_sha512(data_in, digest))
return -EFAULT;
@@ -213,6 +286,10 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
*hash_state_out_be64 =
rte_bswap64(*(((uint64_t *)digest)+i));
break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (partial_hash_md5(data_in, data_out))
+ return -EFAULT;
+ break;
default:
PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
return -EFAULT;
@@ -344,7 +421,8 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
return 0;
}
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ uint16_t proto)
{
PMD_INIT_FUNC_TRACE();
header->hdr_flags =
@@ -358,7 +436,7 @@ void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_PROTO);
+ proto);
ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
@@ -375,127 +453,121 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
enum icp_qat_hw_cipher_convert key_convert;
- uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
- uint16_t cipher_offset = 0;
-
+ uint32_t total_key_size;
+ uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
+ uint16_t cipher_offset, cd_size;
+ uint32_t wordIndex = 0;
+ uint32_t *temp_key = NULL;
PMD_INIT_FUNC_TRACE();
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
- cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
- cipher =
- (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
- sizeof(struct icp_qat_hw_auth_algo_blk));
- cipher_offset = sizeof(struct icp_qat_hw_auth_algo_blk);
- } else {
- cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
- cipher_offset = 0;
- }
- /* CD setup */
- if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
- } else {
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_CMP_AUTH_RES);
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
+ return -EFAULT;
}
if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
- /* CTR Streaming ciphers are a special case. Decrypt = encrypt
+ /*
+ * CTR Streaming ciphers are a special case. Decrypt = encrypt
* Overriding default values previously set
*/
cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
- } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+ else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
else
key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
- if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
- key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
-
- /* For Snow3G, set key convert and other bits */
if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
- key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_RET_AUTH_RES);
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
- }
+ total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
+ proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
+ cipher_cd_ctrl->cipher_padding_sz =
+ (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
+ total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
+ proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
+ } else {
+ total_key_size = cipherkeylen;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
}
+ cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
+ cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+
+ header->service_cmd_id = cdesc->qat_cmd;
+ qat_alg_init_common_hdr(header, proto);
- cipher->aes.cipher_config.val =
+ cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
+
+ cipher->cipher_config.val =
ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
cdesc->qat_cipher_alg, key_convert,
cdesc->qat_dir);
- memcpy(cipher->aes.key, cipherkey, cipherkeylen);
- proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
- if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
- proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
+ sizeof(struct icp_qat_hw_cipher_config)
+ + cipherkeylen);
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ memcpy(temp_key, cipherkey, cipherkeylen);
- /* Request template setup */
- qat_alg_init_common_hdr(header);
- header->service_cmd_id = cdesc->qat_cmd;
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- /* Configure the common header protocol flags */
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
- cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
+ /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
+ wordIndex++)
+ temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
- /* Cipher CD config setup */
- if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
- cipher_cd_ctrl->cipher_key_sz =
- (ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
- ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) >> 3;
- cipher_cd_ctrl->cipher_state_sz =
- ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
- cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
- }
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen + cipherkeylen;
} else {
- cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3;
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen;
}
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else {
- PMD_DRV_LOG(ERR, "invalid param, only authenticated "
- "encryption supported");
- return -EFAULT;
+ if (total_key_size > cipherkeylen) {
+ uint32_t padding_size = total_key_size-cipherkeylen;
+ if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
+ /* K3 not provided so use K1 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
+ else
+ memset(cdesc->cd_cur_ptr, 0, padding_size);
+ cdesc->cd_cur_ptr += padding_size;
}
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+
return 0;
}
@@ -506,8 +578,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint32_t digestsize,
unsigned int operation)
{
- struct icp_qat_hw_cipher_algo_blk *cipher;
- struct icp_qat_hw_auth_algo_blk *hash;
+ struct icp_qat_hw_auth_setup *hash;
struct icp_qat_hw_cipher_algo_blk *cipherconfig;
struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
@@ -519,98 +590,129 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
(struct icp_qat_fw_la_auth_req_params *)
((char *)&req_tmpl->serv_specif_rqpars +
sizeof(struct icp_qat_fw_la_cipher_req_params));
- enum icp_qat_hw_cipher_convert key_convert;
- uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
- uint16_t state1_size = 0;
- uint16_t state2_size = 0;
- uint16_t cipher_offset = 0, hash_offset = 0;
+ uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
+ uint16_t state1_size = 0, state2_size = 0;
+ uint16_t hash_offset, cd_size;
+ uint32_t *aad_len = NULL;
+ uint32_t wordIndex = 0;
+ uint32_t *pTempKey;
PMD_INIT_FUNC_TRACE();
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
- cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
- hash = (struct icp_qat_hw_auth_algo_blk *)&cdesc->cd;
- cipher =
- (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
- sizeof(struct icp_qat_hw_auth_algo_blk));
- hash_offset = 0;
- cipher_offset = ((char *)hash - (char *)cipher);
- } else {
- cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
- hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&cdesc->cd +
- sizeof(struct icp_qat_hw_cipher_algo_blk));
- cipher_offset = 0;
- hash_offset = ((char *)hash - (char *)cipher);
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
+ return -EFAULT;
}
- /* CD setup */
- if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+ if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
} else {
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_CMP_AUTH_RES);
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
}
- if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
- /* CTR Streaming ciphers are a special case. Decrypt = encrypt
- * Overriding default values previously set
- */
- cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
- } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
- key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
- else
- key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
-
- if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
- key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
-
- cipher->aes.cipher_config.val =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
- cdesc->qat_cipher_alg, key_convert,
- cdesc->qat_dir);
-
- hash->sha.inner_setup.auth_config.reserved = 0;
- hash->sha.inner_setup.auth_config.config =
+ /*
+ * Setup the inner hash config
+ */
+ hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
+ hash->auth_config.reserved = 0;
+ hash->auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
cdesc->qat_hash_alg, digestsize);
- hash->sha.inner_setup.auth_counter.counter =
- rte_bswap32(qat_hash_get_block_size(cdesc->qat_hash_alg));
- if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
- hash->sha.inner_setup.auth_counter.counter = 0;
- hash->sha.outer_setup.auth_config.reserved = 0;
- cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
- ((char *)&cdesc->cd +
- sizeof(struct icp_qat_hw_auth_algo_blk)
- + 16);
- cipherconfig->aes.cipher_config.val =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
- ICP_QAT_HW_CIPHER_KEY_CONVERT,
- ICP_QAT_HW_CIPHER_ENCRYPT);
- memcpy(cipherconfig->aes.key, authkey, authkeylen);
- memset(cipherconfig->aes.key + authkeylen, 0,
- ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
- }
- /* Do precomputes */
- if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
- if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
- authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
- ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ), &state2_size)) {
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
+ hash->auth_counter.counter = 0;
+ else
+ hash->auth_counter.counter = rte_bswap32(
+ qat_hash_get_block_size(cdesc->qat_hash_alg));
+
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
+
+ /*
+ * cd_cur_ptr now points at the state1 information.
+ */
+ switch (cdesc->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
return -EFAULT;
}
- } else if ((cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
- (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ proto = ICP_QAT_FW_LA_GCM_PROTO;
+ state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
- authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
- ICP_QAT_HW_GALOIS_128_STATE1_SZ), &state2_size)) {
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
PMD_DRV_LOG(ERR, "(GCM)precompute failed");
return -EFAULT;
}
@@ -618,62 +720,76 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
* Write (the length of AAD) into bytes 16-19 of state2
* in big-endian format. This field is 8 bytes
*/
- uint32_t *aad_len = (uint32_t *)&hash->sha.state1[
+ auth_param->u2.aad_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16);
+ auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
+
+ aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
ICP_QAT_HW_GALOIS_128_STATE1_SZ +
- ICP_QAT_HW_GALOIS_H_SZ];
+ ICP_QAT_HW_GALOIS_H_SZ);
*aad_len = rte_bswap32(add_auth_data_length);
-
- proto = ICP_QAT_FW_LA_GCM_PROTO;
- } else if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
- state1_size = qat_hash_get_state1_size(cdesc->qat_hash_alg);
- } else {
- if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
- authkey, authkeylen, (uint8_t *)(hash->sha.state1),
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
+ state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+
+ cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
+ (cdesc->cd_cur_ptr + state1_size + state2_size);
+ cipherconfig->cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT,
+ ICP_QAT_HW_CIPHER_ENCRYPT);
+ memcpy(cipherconfig->key, authkey, authkeylen);
+ memset(cipherconfig->key + authkeylen,
+ 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ auth_param->hash_state_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
+ authkey, authkeylen, cdesc->cd_cur_ptr,
&state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ PMD_DRV_LOG(ERR, "(MD5)precompute failed");
return -EFAULT;
}
+ state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
+ state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+ pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
+ + authkeylen);
+ /*
+ * The Inner Hash Initial State2 block must contain IK
+ * (Initialisation Key), followed by IK XOR-ed with KM
+ * (Key Modifier): IK||(IK^KM).
+ */
+ /* write the auth key */
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ /* initialise temp key with auth key */
+ memcpy(pTempKey, authkey, authkeylen);
+ /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
+ pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
+ return -EFAULT;
}
/* Request template setup */
- qat_alg_init_common_hdr(header);
+ qat_alg_init_common_hdr(header, proto);
header->service_cmd_id = cdesc->qat_cmd;
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
- /* Configure the common header protocol flags */
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
- cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
-
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
- }
- if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_CMP_AUTH_RES);
- }
-
- /* Cipher CD config setup */
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
-
- if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_AUTH) {
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- cipher_cd_ctrl->cipher_cfg_offset = cipher_offset>>3;
- } else {
- cipher_cd_ctrl->cipher_state_sz = 0;
- cipher_cd_ctrl->cipher_cfg_offset = 0;
- }
/* Auth CD config setup */
hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
@@ -681,130 +797,21 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
hash_cd_ctrl->inner_res_sz = digestsize;
hash_cd_ctrl->final_sz = digestsize;
hash_cd_ctrl->inner_state1_sz = state1_size;
+ auth_param->auth_res_sz = digestsize;
- switch (cdesc->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- hash_cd_ctrl->inner_state2_sz =
- RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
- break;
- case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
- hash_cd_ctrl->inner_state2_sz =
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
- hash_cd_ctrl->inner_state1_sz =
- ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
- memset(hash->sha.state1, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ);
- break;
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
- hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_GALOIS_H_SZ +
- ICP_QAT_HW_GALOIS_LEN_A_SZ +
- ICP_QAT_HW_GALOIS_E_CTR0_SZ;
- hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
- memset(hash->sha.state1, 0, ICP_QAT_HW_GALOIS_128_STATE1_SZ);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
- hash_cd_ctrl->inner_state2_sz =
- ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
- hash_cd_ctrl->inner_state1_sz =
- ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ;
- memset(hash->sha.state1, 0, ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ);
- break;
- default:
- PMD_DRV_LOG(ERR, "invalid HASH alg %u", cdesc->qat_hash_alg);
- return -EFAULT;
- }
-
+ hash_cd_ctrl->inner_state2_sz = state2_size;
hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
((sizeof(struct icp_qat_hw_auth_setup) +
RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
>> 3);
- auth_param->auth_res_sz = digestsize;
-
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else {
- PMD_DRV_LOG(ERR, "invalid param, only authenticated "
- "encryption supported");
- return -EFAULT;
- }
- return 0;
-}
-
-static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
- struct icp_qat_hw_cipher_algo_blk *cd,
- const uint8_t *key, unsigned int keylen)
-{
- struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
- struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
- struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
- PMD_INIT_FUNC_TRACE();
- rte_memcpy(cd->aes.key, key, keylen);
- qat_alg_init_common_hdr(header);
- header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
- cd_pars->u.s.content_desc_params_sz =
- sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
- /* Cipher CD config setup */
- cd_ctrl->cipher_key_sz = keylen >> 3;
- cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- cd_ctrl->cipher_cfg_offset = 0;
- ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-}
+ cdesc->cd_cur_ptr += state1_size + state2_size;
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
-void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
- int alg, const uint8_t *key,
- unsigned int keylen)
-{
- struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
- struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
- struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
-
- PMD_INIT_FUNC_TRACE();
- qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
-}
-
-void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
- int alg, const uint8_t *key,
- unsigned int keylen)
-{
- struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
- struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
- struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
- PMD_INIT_FUNC_TRACE();
- qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
- cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
+ return 0;
}
int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
@@ -836,3 +843,28 @@ int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
}
return 0;
}
+
+int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_KASUMI_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case QAT_3DES_KEY_SZ_OPT1:
+ case QAT_3DES_KEY_SZ_OPT2:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index d51ca968..798cd982 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -54,7 +54,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
@@ -90,6 +89,27 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
{ /* SHA256 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -111,6 +131,27 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
{ /* SHA512 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -132,6 +173,27 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 8,
+ .max = 64,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
{ /* AES XCBC MAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -178,7 +240,32 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
- { /* SNOW3G (UIA2) */
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 1,
+ .max = 65535,
+ .increment = 1
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UIA2) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
@@ -243,7 +330,7 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
- { /* SNOW3G (UEA2) */
+ { /* SNOW 3G (UEA2) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
@@ -283,6 +370,132 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -400,18 +613,46 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid SNOW3G cipher key size");
+ PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
break;
case RTE_CRYPTO_CIPHER_NULL:
- case RTE_CRYPTO_CIPHER_3DES_ECB:
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
+ break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
+ if (qat_alg_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ if (qat_alg_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_AES_CCM:
- case RTE_CRYPTO_CIPHER_KASUMI_F8:
- PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u",
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
cipher_xform->algo);
goto error_out;
default:
@@ -512,9 +753,15 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA1_HMAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+ break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
+ break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
break;
@@ -524,22 +771,28 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_GCM:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
+ break;
case RTE_CRYPTO_AUTH_NULL:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
+ break;
case RTE_CRYPTO_AUTH_SHA1:
case RTE_CRYPTO_AUTH_SHA256:
case RTE_CRYPTO_AUTH_SHA512:
case RTE_CRYPTO_AUTH_SHA224:
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
case RTE_CRYPTO_AUTH_SHA384:
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_MD5_HMAC:
case RTE_CRYPTO_AUTH_AES_CCM:
- case RTE_CRYPTO_AUTH_AES_GMAC:
- case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
@@ -575,7 +828,8 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
return session;
error_out:
- rte_mempool_put(internals->sess_mp, session);
+ if (internals->sess_mp != NULL)
+ rte_mempool_put(internals->sess_mp, session);
return NULL;
}
@@ -697,6 +951,13 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
+ uint8_t do_auth = 0, do_cipher = 0;
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ uint32_t auth_len = 0, auth_ofs = 0;
+ uint32_t min_ofs = 0;
+ uint32_t digest_appended = 1;
+ uint64_t buf_start = 0;
+
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
@@ -719,87 +980,178 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
ctx = (struct qat_session *)op->sym->session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
- *qat_req = ctx->fw_req;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- rte_pktmbuf_data_len(op->sym->m_src);
+ if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ do_auth = 1;
+ do_cipher = 1;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ do_auth = 1;
+ do_cipher = 0;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ do_auth = 0;
+ do_cipher = 1;
+ }
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(op->sym->m_src);
+ if (do_cipher) {
- if (unlikely(op->sym->m_dst != NULL)) {
- qat_req->comn_mid.dest_data_addr =
- rte_pktmbuf_mtophys(op->sym->m_dst);
- qat_req->comn_mid.dst_length =
- rte_pktmbuf_data_len(op->sym->m_dst);
+ if (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+
+ if (unlikely(
+ (cipher_param->cipher_length % BYTE_LENGTH != 0)
+ || (cipher_param->cipher_offset
+ % BYTE_LENGTH != 0))) {
+ PMD_DRV_LOG(ERR,
+ "SNOW3G/KASUMI in QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cipher_len = op->sym->cipher.data.length >> 3;
+ cipher_ofs = op->sym->cipher.data.offset >> 3;
+
+ } else {
+ cipher_len = op->sym->cipher.data.length;
+ cipher_ofs = op->sym->cipher.data.offset;
+ }
+
+ /* copy IV into request if it fits */
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ op->sym->cipher.iv.phys_addr;
+ }
+ min_ofs = cipher_ofs;
}
- cipher_param = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ if (do_auth) {
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
+ if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
+ || (auth_param->auth_len % BYTE_LENGTH != 0))) {
+ PMD_DRV_LOG(ERR,
+ "For SNOW3G/KASUMI, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ auth_len = op->sym->auth.data.length >> 3;
+
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
+ if (do_cipher) {
+ auth_len = auth_len + auth_ofs + 1 -
+ ICP_QAT_HW_KASUMI_BLK_SZ;
+ auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
+ } else {
+ auth_len = auth_len + auth_ofs + 1;
+ auth_ofs = 0;
+ }
+ }
- cipher_param->cipher_length = op->sym->cipher.data.length;
- cipher_param->cipher_offset = op->sym->cipher.data.offset;
- if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
- if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) ||
- (cipher_param->cipher_offset
- % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
- "supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
+ } else {
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+ }
+ min_ofs = auth_ofs;
+
+ if (op->sym->auth.digest.phys_addr) {
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+ auth_param->auth_res_addr =
+ op->sym->auth.digest.phys_addr;
+ digest_appended = 0;
}
- cipher_param->cipher_length >>= 3;
- cipher_param->cipher_offset >>= 3;
+
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
+
}
- if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
+ /* adjust for chain case */
+ if (do_cipher && do_auth)
+ min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+
+
+ /* Start DMA at nearest aligned address below min_ofs */
+ #define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+ buf_start = rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs) &
+ QAT_64_BTYE_ALIGN_MASK;
+
+ if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src)
+ - rte_pktmbuf_headroom(op->sym->m_src)) > buf_start)) {
+ /* alignment has pushed addr ahead of start of mbuf
+ * so revert and take the performance hit
+ */
+ buf_start = rte_pktmbuf_mtophys(op->sym->m_src);
+ }
+
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr = buf_start;
+
+ if (do_cipher) {
+ cipher_param->cipher_offset =
+ (uint32_t)rte_pktmbuf_mtophys_offset(
+ op->sym->m_src, cipher_ofs) - buf_start;
+ cipher_param->cipher_length = cipher_len;
} else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
+ cipher_param->cipher_offset = 0;
+ cipher_param->cipher_length = 0;
}
- if (op->sym->auth.digest.phys_addr) {
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
+ if (do_auth) {
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
+ op->sym->m_src, auth_ofs) - buf_start;
+ auth_param->auth_len = auth_len;
+ } else {
+ auth_param->auth_off = 0;
+ auth_param->auth_len = 0;
}
- auth_param->auth_off = op->sym->auth.data.offset;
- auth_param->auth_len = op->sym->auth.data.length;
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
- if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) ||
- (auth_param->auth_len % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
- "supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- auth_param->auth_off >>= 3;
- auth_param->auth_len >>= 3;
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ > (auth_param->auth_off + auth_param->auth_len) ?
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ : (auth_param->auth_off + auth_param->auth_len);
+
+ if (do_auth && digest_appended) {
+ if (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE)
+ qat_req->comn_mid.dst_length
+ += op->sym->auth.digest.length;
+ else
+ qat_req->comn_mid.src_length
+ += op->sym->auth.digest.length;
}
- auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
- /* (GCM) aad length(240 max) will be at this location after precompute */
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- struct icp_qat_hw_auth_algo_blk *hash;
- if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER)
- hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd);
+ /* out-of-place operation (OOP) */
+ if (unlikely(op->sym->m_dst != NULL)) {
+
+ if (do_auth)
+ qat_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ auth_ofs)
+ - auth_param->auth_off;
else
- hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd +
- sizeof(struct icp_qat_hw_cipher_algo_blk));
+ qat_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ cipher_ofs)
+ - cipher_param->cipher_offset;
+ }
- auth_param->u2.aad_sz = ALIGN_POW2_ROUNDUP(hash->sha.state1[
- ICP_QAT_HW_GALOIS_128_STATE1_SZ +
- ICP_QAT_HW_GALOIS_H_SZ + 3], 16);
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
if (op->sym->cipher.iv.length == 12) {
/*
* For GCM a 12 bit IV is allowed,
@@ -809,8 +1161,24 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
}
+ if (op->sym->cipher.data.length == 0) {
+ /*
+ * GMAC
+ */
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ op->sym->auth.aad.phys_addr;
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
+ cipher_param->cipher_length = 0;
+ cipher_param->cipher_offset = 0;
+ auth_param->u1.aad_adr = 0;
+ auth_param->auth_len = op->sym->auth.aad.length;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->u2.aad_sz = 0;
+ }
}
- auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 5de47e36..2e7188bd 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -300,7 +300,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
* Allocate a memzone for the queue - create a unique name.
*/
snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
- dev->driver->pci_drv.name, "qp_mem", dev->data->dev_id,
+ dev->driver->pci_drv.driver.name, "qp_mem", dev->data->dev_id,
queue->hw_bundle_number, queue->hw_queue_number);
qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
socket_id);
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 82ab047f..1e7ee61c 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -71,6 +71,12 @@ static struct rte_pci_id pci_id_qat_map[] = {
{
RTE_PCI_DEVICE(0x8086, 0x0443),
},
+ {
+ RTE_PCI_DEVICE(0x8086, 0x37c9),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x19e3),
+ },
{.device_id = 0},
};
@@ -113,26 +119,16 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
}
static struct rte_cryptodev_driver rte_qat_pmd = {
- {
+ .pci_drv = {
.id_table = pci_id_qat_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_cryptodev_pci_probe,
+ .remove = rte_cryptodev_pci_remove,
},
.cryptodev_init = crypto_qat_dev_init,
.dev_private_size = sizeof(struct qat_pmd_private),
};
-static int
-rte_qat_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- return rte_cryptodev_pmd_driver_register(&rte_qat_pmd, PMD_PDEV);
-}
-
-static struct rte_driver pmd_qat_drv = {
- .type = PMD_PDEV,
- .init = rte_qat_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(pmd_qat_drv, CRYPTODEV_NAME_QAT_SYM_PMD);
-DRIVER_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index ec31de28..3b4292a6 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -35,7 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -545,7 +545,7 @@ snow3g_pmd_dequeue_burst(void *queue_pair,
return nb_dequeued;
}
-static int cryptodev_snow3g_uninit(const char *name);
+static int cryptodev_snow3g_remove(const char *name);
static int
cryptodev_snow3g_create(const char *name,
@@ -599,12 +599,12 @@ cryptodev_snow3g_create(const char *name,
init_error:
SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed", name);
- cryptodev_snow3g_uninit(crypto_dev_name);
+ cryptodev_snow3g_remove(crypto_dev_name);
return -EFAULT;
}
static int
-cryptodev_snow3g_init(const char *name,
+cryptodev_snow3g_probe(const char *name,
const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
@@ -626,26 +626,26 @@ cryptodev_snow3g_init(const char *name,
}
static int
-cryptodev_snow3g_uninit(const char *name)
+cryptodev_snow3g_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing SNOW3G crypto device %s"
+ RTE_LOG(INFO, PMD, "Closing SNOW 3G crypto device %s"
" on numa socket %u\n",
name, rte_socket_id());
return 0;
}
-static struct rte_driver cryptodev_snow3g_pmd_drv = {
- .type = PMD_VDEV,
- .init = cryptodev_snow3g_init,
- .uninit = cryptodev_snow3g_uninit
+static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
+ .probe = cryptodev_snow3g_probe,
+ .remove = cryptodev_snow3g_remove
};
-PMD_REGISTER_DRIVER(cryptodev_snow3g_pmd_drv, CRYPTODEV_NAME_SNOW3G_PMD);
-DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
index 6f00b066..4602dfd4 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -39,7 +39,7 @@
#include "rte_snow3g_pmd_private.h"
static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
- { /* SNOW3G (UIA2) */
+ { /* SNOW 3G (UIA2) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
@@ -64,7 +64,7 @@ static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
}, }
}, }
},
- { /* SNOW3G (UEA2) */
+ { /* SNOW 3G (UEA2) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
@@ -228,7 +228,7 @@ snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
snow3g_pmd_qp_release(dev, qp_id);
/* Allocate the queue pair data structure. */
- qp = rte_zmalloc_socket("SNOW3G PMD Queue Pair", sizeof(*qp),
+ qp = rte_zmalloc_socket("SNOW 3G PMD Queue Pair", sizeof(*qp),
RTE_CACHE_LINE_SIZE, socket_id);
if (qp == NULL)
return (-ENOMEM);
diff --git a/drivers/crypto/zuc/Makefile b/drivers/crypto/zuc/Makefile
new file mode 100644
index 00000000..b15eb0f6
--- /dev/null
+++ b/drivers/crypto/zuc/Makefile
@@ -0,0 +1,69 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_ZUC_PATH),)
+$(error "Please define LIBSSO_ZUC_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_zuc.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_zuc_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_ZUC_PATH)
+CFLAGS += -I$(LIBSSO_ZUC_PATH)/include
+CFLAGS += -I$(LIBSSO_ZUC_PATH)/build
+LDLIBS += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd_ops.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/zuc/rte_pmd_zuc_version.map b/drivers/crypto/zuc/rte_pmd_zuc_version.map
new file mode 100644
index 00000000..cc5829e3
--- /dev/null
+++ b/drivers/crypto/zuc/rte_pmd_zuc_version.map
@@ -0,0 +1,3 @@
+DPDK_16.11 {
+ local: *;
+};
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
new file mode 100644
index 00000000..38491193
--- /dev/null
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -0,0 +1,550 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_zuc_pmd_private.h"
+
+#define ZUC_DIGEST_LENGTH 4
+#define ZUC_MAX_BURST 8
+#define BYTE_LEN 8
+
+/**
+ * Global static parameter used to create a unique name for each ZUC
+ * crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_ZUC_PMD),
+ unique_name_id++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/** Get xform chain order. */
+static enum zuc_operation
+zuc_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return ZUC_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return ZUC_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return ZUC_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ZUC_OP_AUTH_CIPHER;
+ else
+ return ZUC_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return ZUC_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ZUC_OP_CIPHER_AUTH;
+ else
+ return ZUC_OP_NOT_SUPPORTED;
+ }
+
+ return ZUC_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum zuc_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = zuc_get_mode(xform);
+
+ switch (mode) {
+ case ZUC_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+
+ /* Fall-through */
+ case ZUC_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case ZUC_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case ZUC_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case ZUC_OP_NOT_SUPPORTED:
+ default:
+ ZUC_LOG_ERR("Unsupported operation chain order parameter");
+ return -EINVAL;
+ }
+
+ if (cipher_xform) {
+ /* Only ZUC EEA3 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
+ return -EINVAL;
+ /* Copy the key */
+ memcpy(sess->pKey_cipher, xform->cipher.key.data, ZUC_IV_KEY_LENGTH);
+ }
+
+ if (auth_xform) {
+ /* Only ZUC EIA3 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
+ return -EINVAL;
+ sess->auth_op = auth_xform->auth.op;
+ /* Copy the key */
+ memcpy(sess->pKey_hash, xform->auth.key.data, ZUC_IV_KEY_LENGTH);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get ZUC session. */
+static struct zuc_session *
+zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
+{
+ struct zuc_session *sess;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->dev_type !=
+ RTE_CRYPTODEV_ZUC_PMD))
+ return NULL;
+
+ sess = (struct zuc_session *)op->sym->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct zuc_session *)c_sess->_private;
+
+ if (unlikely(zuc_set_session_parameters(sess,
+ op->sym->xform) != 0))
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_zuc_cipher_op(struct rte_crypto_op **ops,
+ struct zuc_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[ZUC_MAX_BURST], *dst[ZUC_MAX_BURST];
+ uint8_t *IV[ZUC_MAX_BURST];
+ uint32_t num_bytes[ZUC_MAX_BURST];
+ uint8_t *cipher_keys[ZUC_MAX_BURST];
+
+ for (i = 0; i < num_ops; i++) {
+ /* Sanity checks. */
+ if (unlikely(ops[i]->sym->cipher.iv.length != ZUC_IV_KEY_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("iv");
+ break;
+ }
+
+ if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("Data Length or offset");
+ break;
+ }
+
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ IV[i] = ops[i]->sym->cipher.iv.data;
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ cipher_keys[i] = session->pKey_cipher;
+
+ processed_ops++;
+ }
+
+ sso_zuc_eea3_n_buffer(cipher_keys, IV, src, dst,
+ num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_zuc_hash_op(struct rte_crypto_op **ops,
+ struct zuc_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src;
+ uint32_t *dst;
+ uint32_t length_in_bits;
+
+ for (i = 0; i < num_ops; i++) {
+ if (unlikely(ops[i]->sym->auth.aad.length != ZUC_IV_KEY_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("aad");
+ break;
+ }
+
+ if (unlikely(ops[i]->sym->auth.digest.length != ZUC_DIGEST_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("digest");
+ break;
+ }
+
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("Offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = (uint32_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+
+ sso_zuc_eia3_1_buffer(session->pKey_hash,
+ ops[i]->sym->auth.aad.data, src,
+ length_in_bits, dst);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ ops[i]->sym->auth.digest.length) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+ } else {
+ dst = (uint32_t *)ops[i]->sym->auth.digest.data;
+
+ sso_zuc_eia3_1_buffer(session->pKey_hash,
+ ops[i]->sym->auth.aad.data, src,
+ length_in_bits, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
+ struct zuc_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+ switch (session->op) {
+ case ZUC_OP_ONLY_CIPHER:
+ processed_ops = process_zuc_cipher_op(ops,
+ session, num_ops);
+ break;
+ case ZUC_OP_ONLY_AUTH:
+ processed_ops = process_zuc_hash_op(ops, session,
+ num_ops);
+ break;
+ case ZUC_OP_CIPHER_AUTH:
+ processed_ops = process_zuc_cipher_op(ops, session,
+ num_ops);
+ process_zuc_hash_op(ops, session, processed_ops);
+ break;
+ case ZUC_OP_AUTH_CIPHER:
+ processed_ops = process_zuc_hash_op(ops, session,
+ num_ops);
+ process_zuc_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+static uint16_t
+zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
+ struct rte_crypto_op *curr_c_op;
+
+ struct zuc_session *prev_sess = NULL, *curr_sess = NULL;
+ struct zuc_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = zuc_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == ZUC_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == ZUC_MAX_BURST) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+zuc_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct zuc_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_zuc_remove(const char *name);
+
+static int
+cryptodev_zuc_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct zuc_private *internals;
+ uint64_t cpu_flags = 0;
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ else {
+ ZUC_LOG_ERR("Vector instructions are not supported by CPU");
+ return -EFAULT;
+ }
+
+
+ /* Create a unique device name. */
+ if (create_unique_device_name(crypto_dev_name,
+ RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+ ZUC_LOG_ERR("failed to create unique cryptodev name");
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ sizeof(struct zuc_private), init_params->socket_id);
+ if (dev == NULL) {
+ ZUC_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_ZUC_PMD;
+ dev->dev_ops = rte_zuc_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = zuc_pmd_dequeue_burst;
+ dev->enqueue_burst = zuc_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+init_error:
+ ZUC_LOG_ERR("driver %s: cryptodev_zuc_create failed", name);
+
+ cryptodev_zuc_remove(crypto_dev_name);
+ return -EFAULT;
+}
+
+static int
+cryptodev_zuc_probe(const char *name,
+ const char *input_args)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id()
+ };
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_zuc_create(name, &init_params);
+}
+
+static int
+cryptodev_zuc_remove(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing ZUC crypto device %s"
+ " on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
+ .probe = cryptodev_zuc_probe,
+ .remove = cryptodev_zuc_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
new file mode 100644
index 00000000..2c886d51
--- /dev/null
+++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -0,0 +1,342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_zuc_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+zuc_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+zuc_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zuc_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+zuc_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+zuc_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zuc_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+zuc_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct zuc_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = zuc_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+zuc_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+zuc_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct zuc_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "zuc_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->prod.size >= ring_size) {
+ ZUC_LOG_INFO("Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ ZUC_LOG_ERR("Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+zuc_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct zuc_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ zuc_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ZUC PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (zuc_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = zuc_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+zuc_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+zuc_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+zuc_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the ZUC session structure */
+static unsigned
+zuc_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct zuc_session);
+}
+
+/** Configure a ZUC session from a crypto xform chain */
+static void *
+zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ ZUC_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (zuc_set_session_parameters(sess, xform) != 0) {
+ ZUC_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+zuc_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess)
+ memset(sess, 0, sizeof(struct zuc_session));
+}
+
+struct rte_cryptodev_ops zuc_pmd_ops = {
+ .dev_configure = zuc_pmd_config,
+ .dev_start = zuc_pmd_start,
+ .dev_stop = zuc_pmd_stop,
+ .dev_close = zuc_pmd_close,
+
+ .stats_get = zuc_pmd_stats_get,
+ .stats_reset = zuc_pmd_stats_reset,
+
+ .dev_infos_get = zuc_pmd_info_get,
+
+ .queue_pair_setup = zuc_pmd_qp_setup,
+ .queue_pair_release = zuc_pmd_qp_release,
+ .queue_pair_start = zuc_pmd_qp_start,
+ .queue_pair_stop = zuc_pmd_qp_stop,
+ .queue_pair_count = zuc_pmd_qp_count,
+
+ .session_get_size = zuc_pmd_session_get_size,
+ .session_configure = zuc_pmd_session_configure,
+ .session_clear = zuc_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops;
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h
new file mode 100644
index 00000000..030f120b
--- /dev/null
+++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -0,0 +1,108 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ZUC_PMD_PRIVATE_H_
+#define _RTE_ZUC_PMD_PRIVATE_H_
+
+#include <sso_zuc.h>
+
+#define ZUC_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_ZUC_DEBUG
+#define ZUC_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ZUC_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define ZUC_LOG_INFO(fmt, args...)
+#define ZUC_LOG_DBG(fmt, args...)
+#endif
+
+#define ZUC_IV_KEY_LENGTH 16
+/** private data structure for each virtual ZUC device */
+struct zuc_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+/** ZUC buffer queue pair */
+struct zuc_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+enum zuc_operation {
+ ZUC_OP_ONLY_CIPHER,
+ ZUC_OP_ONLY_AUTH,
+ ZUC_OP_CIPHER_AUTH,
+ ZUC_OP_AUTH_CIPHER,
+ ZUC_OP_NOT_SUPPORTED
+};
+
+/** ZUC private session structure */
+struct zuc_session {
+ enum zuc_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
+ uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
+} __rte_cache_aligned;
+
+
+extern int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_zuc_pmd_ops;
+
+
+
+#endif /* _RTE_ZUC_PMD_PRIVATE_H_ */
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index f7955662..ff450685 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -40,7 +40,7 @@
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <rte_kvargs.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
@@ -666,7 +666,7 @@ rte_pmd_init_internals(const char *name,
}
/* reserve an ethdev entry */
- *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ *eth_dev = rte_eth_dev_allocate(name);
if (*eth_dev == NULL)
goto error;
@@ -820,7 +820,7 @@ rte_eth_from_packet(const char *name,
}
static int
-rte_pmd_af_packet_devinit(const char *name, const char *params)
+rte_pmd_af_packet_probe(const char *name, const char *params)
{
unsigned numa_node;
int ret = 0;
@@ -858,7 +858,7 @@ exit:
}
static int
-rte_pmd_af_packet_devuninit(const char *name)
+rte_pmd_af_packet_remove(const char *name)
{
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internals;
@@ -889,14 +889,14 @@ rte_pmd_af_packet_devuninit(const char *name)
return 0;
}
-static struct rte_driver pmd_af_packet_drv = {
- .type = PMD_VDEV,
- .init = rte_pmd_af_packet_devinit,
- .uninit = rte_pmd_af_packet_devuninit,
+static struct rte_vdev_driver pmd_af_packet_drv = {
+ .probe = rte_pmd_af_packet_probe,
+ .remove = rte_pmd_af_packet_remove,
};
-PMD_REGISTER_DRIVER(pmd_af_packet_drv, eth_af_packet);
-DRIVER_REGISTER_PARAM_STRING(eth_af_packet,
+RTE_PMD_REGISTER_VDEV(net_af_packet, pmd_af_packet_drv);
+RTE_PMD_REGISTER_ALIAS(net_af_packet, eth_af_packet);
+RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
"iface=<string> "
"qpairs=<int> "
"blocksz=<int> "
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index ab696801..e971fb66 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -28,7 +28,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += ecore_sp.c
SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += elink.c
SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_vfpf.c
-SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC) += debug.c
# this lib depends upon:
DEPDIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += lib/librte_eal lib/librte_ether
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 8970334e..28566302 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -1397,10 +1397,10 @@ bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj,
return rc;
}
-int
+static int
bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
- unsigned long *rx_accept_flags,
- unsigned long *tx_accept_flags)
+ unsigned long *rx_accept_flags,
+ unsigned long *tx_accept_flags)
{
/* Clear the flags first */
*rx_accept_flags = 0;
@@ -7016,34 +7016,6 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
bnx2x_set_requested_fc(sc);
- if (CHIP_REV_IS_SLOW(sc)) {
- uint32_t bond = CHIP_BOND_ID(sc);
- uint32_t feat = 0;
-
- if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
- feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
- } else if (bond & 0x4) {
- if (CHIP_IS_E3(sc)) {
- feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
- } else {
- feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
- }
- } else if (bond & 0x8) {
- if (CHIP_IS_E3(sc)) {
- feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
- } else {
- feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
- }
- }
-
-/* disable EMAC for E3 and above */
- if (bond & 0x2) {
- feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
- }
-
- sc->link_params.feature_config_flags |= feat;
- }
-
if (load_mode == LOAD_DIAG) {
lp->loopback_mode = ELINK_LOOPBACK_XGXS;
/* Prefer doing PHY loopback at 10G speed, if possible */
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 78757a8d..5cefea43 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -17,6 +17,7 @@
#define __BNX2X_H__
#include <rte_byteorder.h>
+#include <rte_spinlock.h>
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#ifndef __LITTLE_ENDIAN
@@ -304,10 +305,7 @@ struct bnx2x_device_type {
/* TCP with Timestamp Option (32) + IPv6 (40) */
/* max supported alignment is 256 (8 shift) */
-#define BNX2X_RX_ALIGN_SHIFT 8
-/* FW uses 2 cache lines alignment for start packet and size */
-#define BNX2X_FW_RX_ALIGN_START (1 << BNX2X_RX_ALIGN_SHIFT)
-#define BNX2X_FW_RX_ALIGN_END (1 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_RX_ALIGN_SHIFT RTE_MAX(6, min(8, RTE_CACHE_LINE_SIZE_LOG2))
#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
@@ -1031,12 +1029,13 @@ struct bnx2x_softc {
struct bnx2x_mac_ops mac_ops;
/* structures for VF mbox/response/bulletin */
- struct bnx2x_vf_mbx_msg *vf2pf_mbox;
- struct bnx2x_dma vf2pf_mbox_mapping;
- struct vf_acquire_resp_tlv acquire_resp;
+ struct bnx2x_vf_mbx_msg *vf2pf_mbox;
+ struct bnx2x_dma vf2pf_mbox_mapping;
+ struct vf_acquire_resp_tlv acquire_resp;
struct bnx2x_vf_bulletin *pf2vf_bulletin;
- struct bnx2x_dma pf2vf_bulletin_mapping;
- struct bnx2x_vf_bulletin old_bulletin;
+ struct bnx2x_dma pf2vf_bulletin_mapping;
+ struct bnx2x_vf_bulletin old_bulletin;
+ rte_spinlock_t vf2pf_lock;
int media;
@@ -1415,34 +1414,95 @@ struct bnx2x_func_init_params {
#define BAR1 2
#define BAR2 4
+static inline void
+bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val)
+{
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
+ (unsigned long)offset, val);
+ *((volatile uint8_t*)
+ ((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
+}
+
+static inline void
+bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val)
+{
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if ((offset % 2) != 0)
+ PMD_DRV_LOG(NOTICE, "Unaligned 16-bit write to 0x%08lx",
+ (unsigned long)offset);
+#endif
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%04x",
+ (unsigned long)offset, val);
+ *((volatile uint16_t*)
+ ((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
+}
+
+static inline void
+bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val)
+{
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
-uint8_t bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset);
-uint16_t bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset);
-uint32_t bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset);
+ if ((offset % 4) != 0)
+ PMD_DRV_LOG(NOTICE, "Unaligned 32-bit write to 0x%08lx",
+ (unsigned long)offset);
+#endif
-void bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val);
-void bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val);
-void bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val);
-#else
-#define bnx2x_reg_write8(sc, offset, val)\
- *((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ (unsigned long)offset, val);
+ *((volatile uint32_t*)
+ ((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
+}
+
+static inline uint8_t
+bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset)
+{
+ uint8_t val;
+
+ val = (uint8_t)(*((volatile uint8_t*)
+ ((uintptr_t)sc->bar[BAR0].base_addr + offset)));
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
+ (unsigned long)offset, val);
+
+ return val;
+}
+
+static inline uint16_t
+bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset)
+{
+ uint16_t val;
-#define bnx2x_reg_write16(sc, offset, val)\
- *((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if ((offset % 2) != 0)
+ PMD_DRV_LOG(NOTICE, "Unaligned 16-bit read from 0x%08lx",
+ (unsigned long)offset);
+#endif
-#define bnx2x_reg_write32(sc, offset, val)\
- *((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val
+ val = (uint16_t)(*((volatile uint16_t*)
+ ((uintptr_t)sc->bar[BAR0].base_addr + offset)));
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ (unsigned long)offset, val);
-#define bnx2x_reg_read8(sc, offset)\
- (*((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)))
+ return val;
+}
-#define bnx2x_reg_read16(sc, offset)\
- (*((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)))
+static inline uint32_t
+bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
+{
+ uint32_t val;
-#define bnx2x_reg_read32(sc, offset)\
- (*((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)))
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if ((offset % 4) != 0)
+ PMD_DRV_LOG(NOTICE, "Unaligned 32-bit read from 0x%08lx",
+ (unsigned long)offset);
#endif
+ val = (uint32_t)(*((volatile uint32_t*)
+ ((uintptr_t)sc->bar[BAR0].base_addr + offset)));
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ (unsigned long)offset, val);
+
+ return val;
+}
+
#define REG_ADDR(sc, offset) (((uint64_t)sc->bar[BAR0].base_addr) + (offset))
#define REG_RD8(sc, offset) bnx2x_reg_read8(sc, (offset))
@@ -1883,8 +1943,6 @@ int bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
int leading);
void bnx2x_free_hsi_mem(struct bnx2x_softc *sc);
int bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc);
-int bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
- unsigned long *rx_accept_flags, unsigned long *tx_accept_flags);
int bnx2x_check_bull(struct bnx2x_softc *sc);
//#define BNX2X_PULSE
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index f3ab3550..0eae433f 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -203,8 +203,6 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
/* Print important adapter info for the user. */
bnx2x_print_adapter_info(sc);
- DELAY_MS(2500);
-
return ret;
}
@@ -577,6 +575,8 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
if (IS_VF(sc)) {
+ rte_spinlock_init(&sc->vf2pf_lock);
+
if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
&sc->vf2pf_mbox_mapping, "vf2pf_mbox",
RTE_CACHE_LINE_SIZE) != 0)
@@ -618,9 +618,10 @@ eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
static struct eth_driver rte_bnx2x_pmd = {
.pci_drv = {
- .name = "rte_bnx2x_pmd",
.id_table = pci_id_bnx2x_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_bnx2x_dev_init,
.dev_private_size = sizeof(struct bnx2x_softc),
@@ -631,41 +632,16 @@ static struct eth_driver rte_bnx2x_pmd = {
*/
static struct eth_driver rte_bnx2xvf_pmd = {
.pci_drv = {
- .name = "rte_bnx2xvf_pmd",
.id_table = pci_id_bnx2xvf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_bnx2xvf_dev_init,
.dev_private_size = sizeof(struct bnx2x_softc),
};
-static int rte_bnx2x_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- rte_eth_driver_register(&rte_bnx2x_pmd);
-
- return 0;
-}
-
-static int rte_bnx2xvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- rte_eth_driver_register(&rte_bnx2xvf_pmd);
-
- return 0;
-}
-
-static struct rte_driver rte_bnx2x_driver = {
- .type = PMD_PDEV,
- .init = rte_bnx2x_pmd_init,
-};
-
-static struct rte_driver rte_bnx2xvf_driver = {
- .type = PMD_PDEV,
- .init = rte_bnx2xvf_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_bnx2x_driver, bnx2x);
-DRIVER_REGISTER_PCI_TABLE(bnx2x, pci_id_bnx2x_map);
-PMD_REGISTER_DRIVER(rte_bnx2xvf_driver, bnx2xvf);
-DRIVER_REGISTER_PCI_TABLE(bnx2xvf, pci_id_bnx2xvf_map);
+RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
+RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index 0ec4f899..170e48fb 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -19,7 +19,8 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id);
+ dev->driver->pci_drv.driver.name, ring_name,
+ dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
if (mz)
@@ -59,7 +60,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
uint16_t j, idx;
@@ -84,7 +85,6 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->mb_pool = mp;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : ETHER_CRC_LEN);
rxq->nb_rx_pages = 1;
while (USABLE_RX_BD(rxq) < nb_desc)
@@ -94,13 +94,9 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
sc->rx_ring_size = USABLE_RX_BD(rxq);
rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);
- rxq->rx_free_thresh = rx_conf->rx_free_thresh ?
- rx_conf->rx_free_thresh : DEFAULT_RX_FREE_THRESH;
-
- PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
+ PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, usable_bd=%lu, "
"total_bd=%lu, rx_pages=%u, cq_pages=%u",
- queue_idx, nb_desc, rxq->rx_free_thresh,
- (unsigned long)USABLE_RX_BD(rxq),
+ queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq),
(unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages,
rxq->nb_cq_pages);
@@ -135,7 +131,6 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
/* Initialize software ring entries */
- rxq->rx_mbuf_alloc = 0;
for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) {
mbuf = rte_mbuf_raw_alloc(mp);
if (NULL == mbuf) {
@@ -146,7 +141,6 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
rxq->sw_ring[idx] = mbuf;
rxq->rx_ring[idx] = mbuf->buf_physaddr;
- rxq->rx_mbuf_alloc++;
}
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h
index ccb22fc1..dd251aaf 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.h
+++ b/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -11,8 +11,6 @@
#ifndef _BNX2X_RXTX_H_
#define _BNX2X_RXTX_H_
-
-#define DEFAULT_RX_FREE_THRESH 0
#define DEFAULT_TX_FREE_THRESH 512
#define RTE_PMD_BNX2X_TX_MAX_BURST 1
@@ -42,13 +40,9 @@ struct bnx2x_rx_queue {
uint16_t rx_bd_tail; /**< Index of last rx bd. */
uint16_t rx_cq_head; /**< Index of current rcq bd. */
uint16_t rx_cq_tail; /**< Index of last rcq bd. */
- uint16_t nb_rx_hold; /**< number of held free RX desc. */
- uint16_t rx_free_thresh; /**< max free RX desc to hold. */
uint16_t queue_id; /**< RX queue index. */
uint8_t port_id; /**< Device port identifier. */
- uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
struct bnx2x_softc *sc; /**< Ptr to dev_private data. */
- uint64_t rx_mbuf_alloc; /**< Number of allocated mbufs. */
};
/**
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c
index 1c895f88..c47beb0e 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/bnx2x/bnx2x_vfpf.c
@@ -64,25 +64,46 @@ bnx2x_check_bull(struct bnx2x_softc *sc)
return TRUE;
}
-/* add tlv to a buffer */
-#define BNX2X_TLV_APPEND(_tlvs, _offset, _type, _length) \
- ((struct vf_first_tlv *)((unsigned long)_tlvs + _offset))->type = _type; \
- ((struct vf_first_tlv *)((unsigned long)_tlvs + _offset))->length = _length
+/* place a given tlv on the tlv buffer at a given offset */
+static void
+bnx2x_add_tlv(__rte_unused struct bnx2x_softc *sc, void *tlvs_list,
+ uint16_t offset, uint16_t type, uint16_t length)
+{
+ struct channel_tlv *tl = (struct channel_tlv *)
+ ((unsigned long)tlvs_list + offset);
+
+ tl->type = type;
+ tl->length = length;
+}
/* Initiliaze header of the first tlv and clear mailbox*/
static void
-bnx2x_init_first_tlv(struct bnx2x_softc *sc, struct vf_first_tlv *tlv,
- uint16_t type, uint16_t len)
+bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv,
+ uint16_t type, uint16_t length)
{
struct bnx2x_vf_mbx_msg *mbox = sc->vf2pf_mbox;
+
+ rte_spinlock_lock(&sc->vf2pf_lock);
+
PMD_DRV_LOG(DEBUG, "Preparing %d tlv for sending", type);
memset(mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
- BNX2X_TLV_APPEND(tlv, 0, type, len);
+ bnx2x_add_tlv(sc, &first_tlv->tl, 0, type, length);
/* Initialize header of the first tlv */
- tlv->reply_offset = sizeof(mbox->query);
+ first_tlv->reply_offset = sizeof(mbox->query);
+}
+
+/* releases the mailbox */
+static void
+bnx2x_vf_finalize(struct bnx2x_softc *sc,
+ __rte_unused struct vf_first_tlv *first_tlv)
+{
+ PMD_DRV_LOG(DEBUG, "done sending [%d] tlv over vf pf channel",
+ first_tlv->tl.type);
+
+ rte_spinlock_unlock(&sc->vf2pf_lock);
}
#define BNX2X_VF_CMD_ADDR_LO PXP_VF_ADDR_CSDM_GLOBAL_START
@@ -97,39 +118,36 @@ bnx2x_do_req4pf(struct bnx2x_softc *sc, phys_addr_t phys_addr)
uint8_t *status = &sc->vf2pf_mbox->resp.common_reply.status;
uint8_t i;
- if (!*status) {
- bnx2x_check_bull(sc);
- if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
- PMD_DRV_LOG(ERR, "channel is down. Aborting message sending");
- *status = BNX2X_VF_STATUS_SUCCESS;
- return 0;
- }
+ if (*status) {
+ PMD_DRV_LOG(ERR, "status should be zero before message"
+ " to pf was sent");
+ return -EINVAL;
+ }
- REG_WR(sc, BNX2X_VF_CMD_ADDR_LO, U64_LO(phys_addr));
- REG_WR(sc, BNX2X_VF_CMD_ADDR_HI, U64_HI(phys_addr));
+ bnx2x_check_bull(sc);
+ if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
+ PMD_DRV_LOG(ERR, "channel is down. Aborting message sending");
+ return -EINVAL;
+ }
- /* memory barrier to ensure that FW can read phys_addr */
- wmb();
+ REG_WR(sc, BNX2X_VF_CMD_ADDR_LO, U64_LO(phys_addr));
+ REG_WR(sc, BNX2X_VF_CMD_ADDR_HI, U64_HI(phys_addr));
- REG_WR8(sc, BNX2X_VF_CMD_TRIGGER, 1);
+ /* memory barrier to ensure that FW can read phys_addr */
+ wmb();
- /* Do several attempts until PF completes
- * "." is used to show progress
- */
- for (i = 0; i < BNX2X_VF_CHANNEL_TRIES; i++) {
- DELAY_MS(BNX2X_VF_CHANNEL_DELAY);
- if (*status)
- break;
- }
+ REG_WR8(sc, BNX2X_VF_CMD_TRIGGER, 1);
- if (!*status) {
- PMD_DRV_LOG(ERR, "Response from PF timed out");
- return -EAGAIN;
- }
- } else {
- PMD_DRV_LOG(ERR, "status should be zero before message"
- "to pf was sent");
- return -EINVAL;
+ /* Do several attempts until PF completes */
+ for (i = 0; i < BNX2X_VF_CHANNEL_TRIES; i++) {
+ DELAY_MS(BNX2X_VF_CHANNEL_DELAY);
+ if (*status)
+ break;
+ }
+
+ if (!*status) {
+ PMD_DRV_LOG(ERR, "Response from PF timed out");
+ return -EAGAIN;
}
PMD_DRV_LOG(DEBUG, "Response from PF was received");
@@ -168,31 +186,23 @@ static inline int bnx2x_read_vf_id(struct bnx2x_softc *sc)
#define BNX2X_VF_OBTAIN_MAC_FILTERS 1
#define BNX2X_VF_OBTAIN_MC_FILTERS 10
-struct bnx2x_obtain_status {
- int success;
- int err_code;
-};
-
static
-struct bnx2x_obtain_status bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
+int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
{
- int tries = 0;
struct vf_acquire_resp_tlv *resp = &sc->vf2pf_mbox->resp.acquire_resp,
- *sc_resp = &sc->acquire_resp;
- struct vf_resource_query *res_query;
- struct vf_resc *resc;
- struct bnx2x_obtain_status status;
+ *sc_resp = &sc->acquire_resp;
+ struct vf_resource_query *res_query;
+ struct vf_resc *resc;
int res_obtained = false;
+ int tries = 0;
+ int rc;
do {
PMD_DRV_LOG(DEBUG, "trying to get resources");
- if (bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr)) {
- /* timeout */
- status.success = 0;
- status.err_code = -EAGAIN;
- return status;
- }
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ return rc;
memcpy(sc_resp, resp, sizeof(sc->acquire_resp));
@@ -203,12 +213,12 @@ struct bnx2x_obtain_status bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
PMD_DRV_LOG(DEBUG, "resources obtained successfully");
res_obtained = true;
} else if (sc_resp->status == BNX2X_VF_STATUS_NO_RESOURCES &&
- tries < BNX2X_VF_OBTAIN_MAX_TRIES) {
+ tries < BNX2X_VF_OBTAIN_MAX_TRIES) {
PMD_DRV_LOG(DEBUG,
"PF cannot allocate requested amount of resources");
res_query = &sc->vf2pf_mbox->query[0].acquire.res_query;
- resc = &sc_resp->resc;
+ resc = &sc_resp->resc;
/* PF refused our request. Try to decrease request params */
res_query->num_txqs = min(res_query->num_txqs, resc->num_txqs);
@@ -220,30 +230,30 @@ struct bnx2x_obtain_status bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
memset(&sc->vf2pf_mbox->resp, 0, sizeof(union resp_tlvs));
} else {
- PMD_DRV_LOG(ERR, "Resources cannot be obtained. Status of handling: %d. Aborting",
- sc_resp->status);
- status.success = 0;
- status.err_code = -EAGAIN;
- return status;
+ PMD_DRV_LOG(ERR, "Failed to get the requested "
+ "amount of resources: %d.",
+ sc_resp->status);
+ return -EINVAL;
}
} while (!res_obtained);
- status.success = 1;
- return status;
+ return 0;
}
int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_count)
{
struct vf_acquire_tlv *acq = &sc->vf2pf_mbox->query[0].acquire;
int vf_id;
- struct bnx2x_obtain_status obtain_status;
+ int rc;
bnx2x_vf_close(sc);
- bnx2x_init_first_tlv(sc, &acq->first_tlv, BNX2X_VF_TLV_ACQUIRE, sizeof(*acq));
+ bnx2x_vf_prep(sc, &acq->first_tlv, BNX2X_VF_TLV_ACQUIRE, sizeof(*acq));
vf_id = bnx2x_read_vf_id(sc);
- if (vf_id < 0)
- return -EAGAIN;
+ if (vf_id < 0) {
+ rc = -EAGAIN;
+ goto out;
+ }
acq->vf_id = vf_id;
@@ -256,19 +266,19 @@ int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_
acq->bulletin_addr = sc->pf2vf_bulletin_mapping.paddr;
/* Request physical port identifier */
- BNX2X_TLV_APPEND(acq, acq->first_tlv.length,
- BNX2X_VF_TLV_PHYS_PORT_ID,
- sizeof(struct channel_tlv));
+ bnx2x_add_tlv(sc, acq, acq->first_tlv.tl.length,
+ BNX2X_VF_TLV_PHYS_PORT_ID,
+ sizeof(struct channel_tlv));
- BNX2X_TLV_APPEND(acq,
- (acq->first_tlv.length + sizeof(struct channel_tlv)),
- BNX2X_VF_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
+ bnx2x_add_tlv(sc, acq,
+ (acq->first_tlv.tl.length + sizeof(struct channel_tlv)),
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
/* requesting the resources in loop */
- obtain_status = bnx2x_loop_obtain_resources(sc);
- if (!obtain_status.success)
- return obtain_status.err_code;
+ rc = bnx2x_loop_obtain_resources(sc);
+ if (rc)
+ goto out;
struct vf_acquire_resp_tlv sc_resp = sc->acquire_resp;
@@ -299,7 +309,10 @@ int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_
else
eth_random_addr(sc->link_params.mac_addr);
- return 0;
+out:
+ bnx2x_vf_finalize(sc, &acq->first_tlv);
+
+ return rc;
}
/* Ask PF to release VF's resources */
@@ -309,19 +322,23 @@ bnx2x_vf_close(struct bnx2x_softc *sc)
struct vf_release_tlv *query;
struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
int vf_id = bnx2x_read_vf_id(sc);
+ int rc;
if (vf_id >= 0) {
query = &sc->vf2pf_mbox->query[0].release;
- bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE,
- sizeof(*query));
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_RELEASE,
+ sizeof(*query));
query->vf_id = vf_id;
- BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
- bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
- if (reply->status != BNX2X_VF_STATUS_SUCCESS)
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
PMD_DRV_LOG(ERR, "Failed to release VF");
+
+ bnx2x_vf_finalize(sc, &query->first_tlv);
}
}
@@ -331,11 +348,11 @@ bnx2x_vf_init(struct bnx2x_softc *sc)
{
struct vf_init_tlv *query;
struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
- int i;
+ int i, rc;
query = &sc->vf2pf_mbox->query[0].init;
- bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_INIT,
- sizeof(*query));
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_INIT,
+ sizeof(*query));
FOR_EACH_QUEUE(sc, i) {
query->sb_addr[i] = (unsigned long)(sc->fp[i].sb_dma.paddr);
@@ -345,17 +362,23 @@ bnx2x_vf_init(struct bnx2x_softc *sc)
query->stats_addr = sc->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats);
- BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
- bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to init VF");
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
PMD_DRV_LOG(DEBUG, "VF was initialized");
- return 0;
+out:
+ bnx2x_vf_finalize(sc, &query->first_tlv);
+ return rc;
}
void
@@ -364,44 +387,49 @@ bnx2x_vf_unload(struct bnx2x_softc *sc)
struct vf_close_tlv *query;
struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
struct vf_q_op_tlv *query_op;
- int i, vf_id;
+ int i, vf_id, rc;
vf_id = bnx2x_read_vf_id(sc);
if (vf_id > 0) {
FOR_EACH_QUEUE(sc, i) {
query_op = &sc->vf2pf_mbox->query[0].q_op;
- bnx2x_init_first_tlv(sc, &query_op->first_tlv,
- BNX2X_VF_TLV_TEARDOWN_Q,
- sizeof(*query_op));
+ bnx2x_vf_prep(sc, &query_op->first_tlv,
+ BNX2X_VF_TLV_TEARDOWN_Q,
+ sizeof(*query_op));
query_op->vf_qid = i;
- BNX2X_TLV_APPEND(query_op, query_op->first_tlv.length,
- BNX2X_VF_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
+ bnx2x_add_tlv(sc, query_op,
+ query_op->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
- bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
- if (reply->status != BNX2X_VF_STATUS_SUCCESS)
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
PMD_DRV_LOG(ERR,
"Bad reply for vf_q %d teardown", i);
+
+ bnx2x_vf_finalize(sc, &query_op->first_tlv);
}
bnx2x_vf_set_mac(sc, false);
query = &sc->vf2pf_mbox->query[0].close;
- bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE,
- sizeof(*query));
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_CLOSE,
+ sizeof(*query));
query->vf_id = vf_id;
- BNX2X_TLV_APPEND(query, query->first_tlv.length,
- BNX2X_VF_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
- bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
- if (reply->status != BNX2X_VF_STATUS_SUCCESS)
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
PMD_DRV_LOG(ERR,
"Bad reply from PF for close message");
+
+ bnx2x_vf_finalize(sc, &query->first_tlv);
}
}
@@ -466,10 +494,11 @@ bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, int lead
struct vf_setup_q_tlv *query;
struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
uint16_t flags = bnx2x_vf_q_flags(leading);
+ int rc;
query = &sc->vf2pf_mbox->query[0].setup_q;
- bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_SETUP_Q,
- sizeof(*query));
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SETUP_Q,
+ sizeof(*query));
query->vf_qid = fp->index;
query->param_valid = VF_RXQ_VALID | VF_TXQ_VALID;
@@ -477,17 +506,22 @@ bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, int lead
bnx2x_vf_rx_q_prep(sc, fp, &query->rxq, flags);
bnx2x_vf_tx_q_prep(sc, fp, &query->txq, flags);
- BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
- bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to setup VF queue[%d]",
fp->index);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vf_finalize(sc, &query->first_tlv);
- return 0;
+ return rc;
}
int
@@ -495,9 +529,10 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
{
struct vf_set_q_filters_tlv *query;
struct vf_common_reply_tlv *reply;
+ int rc;
query = &sc->vf2pf_mbox->query[0].set_q_filters;
- bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
sizeof(*query));
query->vf_qid = sc->fp->index;
@@ -511,10 +546,13 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
rte_memcpy(query->filters[0].mac, sc->link_params.mac_addr, ETH_ALEN);
- BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
- bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
reply = &sc->vf2pf_mbox->resp.common_reply;
while (BNX2X_VF_STATUS_FAILURE == reply->status &&
@@ -525,16 +563,20 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
rte_memcpy(query->filters[0].mac, sc->pf2vf_bulletin->mac,
ETH_ALEN);
- bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
}
if (BNX2X_VF_STATUS_SUCCESS != reply->status) {
PMD_DRV_LOG(ERR, "Bad reply from PF for SET MAC message: %d",
reply->status);
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vf_finalize(sc, &query->first_tlv);
- return 0;
+ return rc;
}
int
@@ -543,15 +585,17 @@ bnx2x_vf_config_rss(struct bnx2x_softc *sc,
{
struct vf_rss_tlv *query;
struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
+ int rc;
query = &sc->vf2pf_mbox->query[0].update_rss;
- bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_UPDATE_RSS,
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_UPDATE_RSS,
sizeof(*query));
/* add list termination tlv */
- BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
rte_memcpy(query->rss_key, params->rss_key, sizeof(params->rss_key));
query->rss_key_size = T_ETH_RSS_KEY;
@@ -562,13 +606,18 @@ bnx2x_vf_config_rss(struct bnx2x_softc *sc,
query->rss_result_mask = params->rss_result_mask;
query->rss_flags = params->rss_flags;
- bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
+
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to configure RSS");
- return -EINVAL;
+ rc = -EINVAL;
}
+out:
+ bnx2x_vf_finalize(sc, &query->first_tlv);
- return 0;
+ return rc;
}
int
@@ -576,27 +625,55 @@ bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
{
struct vf_set_q_filters_tlv *query;
struct vf_common_reply_tlv *reply = &sc->vf2pf_mbox->resp.common_reply;
- unsigned long tx_mask;
+ int rc;
query = &sc->vf2pf_mbox->query[0].set_q_filters;
- bnx2x_init_first_tlv(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
+ bnx2x_vf_prep(sc, &query->first_tlv, BNX2X_VF_TLV_SET_Q_FILTERS,
sizeof(*query));
query->vf_qid = 0;
query->flags = BNX2X_VF_RX_MASK_CHANGED;
- if (bnx2x_fill_accept_flags(sc, sc->rx_mode, &query->rx_mask, &tx_mask)) {
- return -EINVAL;
+ switch (sc->rx_mode) {
+ case BNX2X_RX_MODE_NONE: /* no Rx */
+ query->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
+ break;
+ case BNX2X_RX_MODE_NORMAL:
+ query->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_ALLMULTI:
+ query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ case BNX2X_RX_MODE_PROMISC:
+ query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
+ query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "BAD rx mode (%d)", sc->rx_mode);
+ rc = -EINVAL;
+ goto out;
}
- BNX2X_TLV_APPEND(query, query->first_tlv.length, BNX2X_VF_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
+ bnx2x_add_tlv(sc, query, query->first_tlv.tl.length,
+ BNX2X_VF_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
+ if (rc)
+ goto out;
- bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to set RX mode");
- return -EINVAL;
+ rc = -EINVAL;
}
- return 0;
+out:
+ bnx2x_vf_finalize(sc, &query->first_tlv);
+
+ return rc;
}
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h
index f854d81b..955ea982 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/bnx2x/bnx2x_vfpf.h
@@ -40,6 +40,13 @@ struct vf_resource_query {
#define TLV_BUFFER_SIZE 1024
+#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
+#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
+#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
+#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
+#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
+#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
+
/* general tlv header (used for both vf->pf request and pf->vf response) */
struct channel_tlv {
uint16_t type;
@@ -47,8 +54,7 @@ struct channel_tlv {
};
struct vf_first_tlv {
- uint16_t type;
- uint16_t length;
+ struct channel_tlv tl;
uint32_t reply_offset;
};
@@ -58,16 +64,14 @@ struct tlv_buffer_size {
/* tlv struct for all PF replies except acquire */
struct vf_common_reply_tlv {
- uint16_t type;
- uint16_t length;
+ struct channel_tlv tl;
uint8_t status;
uint8_t pad[3];
};
/* used to terminate and pad a tlv list */
struct channel_list_end_tlv {
- uint16_t type;
- uint16_t length;
+ struct channel_tlv tl;
uint32_t pad;
};
@@ -327,7 +331,6 @@ struct bnx2x_vf_mbx_msg {
union resp_tlvs resp;
};
-void bnx2x_add_tlv(void *tlvs_list, uint16_t offset, uint16_t type, uint16_t length);
int bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set);
int bnx2x_vf_config_rss(struct bnx2x_softc *sc, struct ecore_config_rss_params *params);
diff --git a/drivers/net/bnx2x/debug.c b/drivers/net/bnx2x/debug.c
deleted file mode 100644
index cc50845c..00000000
--- a/drivers/net/bnx2x/debug.c
+++ /dev/null
@@ -1,96 +0,0 @@
-/*-
- * Copyright (c) 2007-2013 QLogic Corporation. All rights reserved.
- *
- * Eric Davis <edavis@broadcom.com>
- * David Christensen <davidch@broadcom.com>
- * Gary Zambrano <zambrano@broadcom.com>
- *
- * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
- * Copyright (c) 2015 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.bnx2x_pmd for copyright and licensing details.
- */
-
-#include "bnx2x.h"
-
-
-/*
- * Debug versions of the 8/16/32 bit OS register read/write functions to
- * capture/display values read/written from/to the controller.
- */
-void
-bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val)
-{
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x", (unsigned long)offset, val);
- *((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
-}
-
-void
-bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val)
-{
- if ((offset % 2) != 0) {
- PMD_DRV_LOG(NOTICE, "Unaligned 16-bit write to 0x%08lx",
- (unsigned long)offset);
- }
-
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%04x", (unsigned long)offset, val);
- *((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
-}
-
-void
-bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val)
-{
- if ((offset % 4) != 0) {
- PMD_DRV_LOG(NOTICE, "Unaligned 32-bit write to 0x%08lx",
- (unsigned long)offset);
- }
-
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x", (unsigned long)offset, val);
- *((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
-}
-
-uint8_t
-bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset)
-{
- uint8_t val;
-
- val = (uint8_t)(*((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)));
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x", (unsigned long)offset, val);
-
- return val;
-}
-
-uint16_t
-bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset)
-{
- uint16_t val;
-
- if ((offset % 2) != 0) {
- PMD_DRV_LOG(NOTICE, "Unaligned 16-bit read from 0x%08lx",
- (unsigned long)offset);
- }
-
- val = (uint16_t)(*((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)));
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x", (unsigned long)offset, val);
-
- return val;
-}
-
-uint32_t
-bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
-{
- uint32_t val;
-
- if ((offset % 4) != 0) {
- PMD_DRV_LOG(NOTICE, "Unaligned 32-bit read from 0x%08lx",
- (unsigned long)offset);
- return 0;
- }
-
- val = (uint32_t)(*((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)));
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x", (unsigned long)offset, val);
-
- return val;
-}
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index d9a72f0a..53293962 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -1586,26 +1586,6 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
/* enable emac and not bmac */
REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 1);
-#ifdef ELINK_INCLUDE_EMUL
- /* for paladium */
- if (CHIP_REV_IS_EMUL(sc)) {
- /* Use lane 1 (of lanes 0-3) */
- REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1);
- REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1);
- }
- /* for fpga */
- else
-#endif
-#ifdef ELINK_INCLUDE_FPGA
- if (CHIP_REV_IS_FPGA(sc)) {
- /* Use lane 1 (of lanes 0-3) */
- PMD_DRV_LOG(DEBUG, "elink_emac_enable: Setting FPGA");
-
- REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1);
- REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0);
- } else
-#endif
- /* ASIC */
if (vars->phy_flags & PHY_XGXS_FLAG) {
uint32_t ser_lane = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
@@ -1628,39 +1608,28 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
-#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA)
- if (CHIP_REV_IS_SLOW(sc)) {
- /* config GMII mode */
- val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE);
- elink_cb_reg_write(sc, emac_base + EMAC_REG_EMAC_MODE,
- (val | EMAC_MODE_PORT_GMII));
- } else { /* ASIC */
-#endif
- /* pause enable/disable */
- elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
+ /* pause enable/disable */
+ elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
- elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- if (!(params->feature_config_flags &
- ELINK_FEATURE_CONFIG_PFC_ENABLED)) {
- if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)
- elink_bits_en(sc, emac_base +
- EMAC_REG_EMAC_RX_MODE,
- EMAC_RX_MODE_FLOW_EN);
-
- if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)
- elink_bits_en(sc, emac_base +
- EMAC_REG_EMAC_TX_MODE,
- (EMAC_TX_MODE_EXT_PAUSE_EN |
- EMAC_TX_MODE_FLOW_EN));
- } else
- elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
- EMAC_TX_MODE_FLOW_EN);
-#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA)
- }
-#endif
+ elink_bits_dis(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ if (!(params->feature_config_flags &
+ ELINK_FEATURE_CONFIG_PFC_ENABLED)) {
+ if (vars->flow_ctrl & ELINK_FLOW_CTRL_RX)
+ elink_bits_en(sc, emac_base +
+ EMAC_REG_EMAC_RX_MODE,
+ EMAC_RX_MODE_FLOW_EN);
+
+ if (vars->flow_ctrl & ELINK_FLOW_CTRL_TX)
+ elink_bits_en(sc, emac_base +
+ EMAC_REG_EMAC_TX_MODE,
+ (EMAC_TX_MODE_EXT_PAUSE_EN |
+ EMAC_TX_MODE_FLOW_EN));
+ } else
+ elink_bits_en(sc, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_FLOW_EN);
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(sc, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -1727,17 +1696,7 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
REG_WR(sc, NIG_REG_EMAC0_PAUSE_OUT_EN + port * 4, val);
REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0x1);
-#ifdef ELINK_INCLUDE_EMUL
- if (CHIP_REV_IS_EMUL(sc)) {
- /* Take the BigMac out of reset */
- REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
- (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-
- /* Enable access for bmac registers */
- REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x1);
- } else
-#endif
- REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x0);
+ REG_WR(sc, NIG_REG_BMAC0_REGS_OUT_EN + port * 4, 0x0);
vars->mac_type = ELINK_MAC_TYPE_EMAC;
return ELINK_STATUS_OK;
@@ -2137,15 +2096,6 @@ static elink_status_t elink_bmac1_enable(struct elink_params *params,
wb_data[1] = 0;
REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
wb_data, 2);
-#ifdef ELINK_INCLUDE_EMUL
- /* Fix for emulation */
- if (CHIP_REV_IS_EMUL(sc)) {
- wb_data[0] = 0xf000;
- wb_data[1] = 0;
- REG_WR_DMAE(sc, bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
- wb_data, 2);
- }
-#endif
return ELINK_STATUS_OK;
}
@@ -5922,11 +5872,6 @@ elink_status_t elink_set_led(struct elink_params *params,
params, mode);
}
}
-#ifdef ELINK_INCLUDE_EMUL
- if (params->feature_config_flags &
- ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC)
- return rc;
-#endif
switch (mode) {
case ELINK_LED_MODE_FRONT_PANEL_OFF:
@@ -11671,10 +11616,7 @@ elink_status_t elink_phy_probe(struct elink_params * params)
struct elink_phy *phy;
params->num_phys = 0;
PMD_DRV_LOG(DEBUG, "Begin phy probe");
-#ifdef ELINK_INCLUDE_EMUL
- if (CHIP_REV_IS_EMUL(sc))
- return ELINK_STATUS_OK;
-#endif
+
phy_config_swapped = params->multi_phy_config &
PORT_HW_CFG_PHY_SWAPPED_ENABLED;
@@ -11739,182 +11681,6 @@ elink_status_t elink_phy_probe(struct elink_params * params)
return ELINK_STATUS_OK;
}
-#ifdef ELINK_INCLUDE_EMUL
-static elink_status_t elink_init_e3_emul_mac(struct elink_params *params,
- struct elink_vars *vars)
-{
- struct bnx2x_softc *sc = params->sc;
- vars->line_speed = params->req_line_speed[0];
- /* In case link speed is auto, set speed the highest as possible */
- if (params->req_line_speed[0] == ELINK_SPEED_AUTO_NEG) {
- if (params->feature_config_flags &
- ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC)
- vars->line_speed = ELINK_SPEED_2500;
- else if (elink_is_4_port_mode(sc))
- vars->line_speed = ELINK_SPEED_10000;
- else
- vars->line_speed = ELINK_SPEED_20000;
- }
- if (vars->line_speed < ELINK_SPEED_10000) {
- if ((params->feature_config_flags &
- ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC)) {
- PMD_DRV_LOG(DEBUG, "Invalid line speed %d while UMAC is"
- " disabled!", params->req_line_speed[0]);
- return ELINK_STATUS_ERROR;
- }
- switch (vars->line_speed) {
- case ELINK_SPEED_10:
- vars->link_status = ELINK_LINK_10TFD;
- break;
- case ELINK_SPEED_100:
- vars->link_status = ELINK_LINK_100TXFD;
- break;
- case ELINK_SPEED_1000:
- vars->link_status = ELINK_LINK_1000TFD;
- break;
- case ELINK_SPEED_2500:
- vars->link_status = ELINK_LINK_2500TFD;
- break;
- default:
- PMD_DRV_LOG(DEBUG, "Invalid line speed %d for UMAC",
- vars->line_speed);
- return ELINK_STATUS_ERROR;
- }
- vars->link_status |= LINK_STATUS_LINK_UP;
-
- if (params->loopback_mode == ELINK_LOOPBACK_UMAC)
- elink_umac_enable(params, vars, 1);
- else
- elink_umac_enable(params, vars, 0);
- } else {
- /* Link speed >= 10000 requires XMAC enabled */
- if (params->feature_config_flags &
- ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC) {
- PMD_DRV_LOG(DEBUG, "Invalid line speed %d while XMAC is"
- " disabled!", params->req_line_speed[0]);
- return ELINK_STATUS_ERROR;
- }
- /* Check link speed */
- switch (vars->line_speed) {
- case ELINK_SPEED_10000:
- vars->link_status = ELINK_LINK_10GTFD;
- break;
- case ELINK_SPEED_20000:
- vars->link_status = ELINK_LINK_20GTFD;
- break;
- default:
- PMD_DRV_LOG(DEBUG, "Invalid line speed %d for XMAC",
- vars->line_speed);
- return ELINK_STATUS_ERROR;
- }
- vars->link_status |= LINK_STATUS_LINK_UP;
- if (params->loopback_mode == ELINK_LOOPBACK_XMAC)
- elink_xmac_enable(params, vars, 1);
- else
- elink_xmac_enable(params, vars, 0);
- }
- return ELINK_STATUS_OK;
-}
-
-static elink_status_t elink_init_emul(struct elink_params *params,
- struct elink_vars *vars)
-{
- struct bnx2x_softc *sc = params->sc;
- if (CHIP_IS_E3(sc)) {
- if (elink_init_e3_emul_mac(params, vars) != ELINK_STATUS_OK)
- return ELINK_STATUS_ERROR;
- } else {
- if (params->feature_config_flags &
- ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC) {
- vars->line_speed = ELINK_SPEED_1000;
- vars->link_status = (LINK_STATUS_LINK_UP |
- ELINK_LINK_1000XFD);
- if (params->loopback_mode == ELINK_LOOPBACK_EMAC)
- elink_emac_enable(params, vars, 1);
- else
- elink_emac_enable(params, vars, 0);
- } else {
- vars->line_speed = ELINK_SPEED_10000;
- vars->link_status = (LINK_STATUS_LINK_UP |
- ELINK_LINK_10GTFD);
- if (params->loopback_mode == ELINK_LOOPBACK_BMAC)
- elink_bmac_enable(params, vars, 1, 1);
- else
- elink_bmac_enable(params, vars, 0, 1);
- }
- }
- vars->link_up = 1;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
-
- if (CHIP_IS_E1x(sc))
- elink_pbf_update(params, vars->flow_ctrl, vars->line_speed);
- /* Disable drain */
- REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
-
- /* update shared memory */
- elink_update_mng(params, vars->link_status);
- return ELINK_STATUS_OK;
-}
-#endif
-#ifdef ELINK_INCLUDE_FPGA
-static elink_status_t elink_init_fpga(struct elink_params *params,
- struct elink_vars *vars)
-{
- /* Enable on E1.5 FPGA */
- struct bnx2x_softc *sc = params->sc;
- vars->duplex = DUPLEX_FULL;
- vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
- vars->flow_ctrl = (ELINK_FLOW_CTRL_TX | ELINK_FLOW_CTRL_RX);
- vars->link_status |= (LINK_STATUS_TX_FLOW_CONTROL_ENABLED |
- LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
- if (CHIP_IS_E3(sc)) {
- vars->line_speed = params->req_line_speed[0];
- switch (vars->line_speed) {
- case ELINK_SPEED_AUTO_NEG:
- vars->line_speed = ELINK_SPEED_2500;
- case ELINK_SPEED_2500:
- vars->link_status = ELINK_LINK_2500TFD;
- break;
- case ELINK_SPEED_1000:
- vars->link_status = ELINK_LINK_1000XFD;
- break;
- case ELINK_SPEED_100:
- vars->link_status = ELINK_LINK_100TXFD;
- break;
- case ELINK_SPEED_10:
- vars->link_status = ELINK_LINK_10TFD;
- break;
- default:
- PMD_DRV_LOG(DEBUG, "Invalid link speed %d",
- params->req_line_speed[0]);
- return ELINK_STATUS_ERROR;
- }
- vars->link_status |= LINK_STATUS_LINK_UP;
- if (params->loopback_mode == ELINK_LOOPBACK_UMAC)
- elink_umac_enable(params, vars, 1);
- else
- elink_umac_enable(params, vars, 0);
- } else {
- vars->line_speed = ELINK_SPEED_10000;
- vars->link_status = (LINK_STATUS_LINK_UP | ELINK_LINK_10GTFD);
- if (params->loopback_mode == ELINK_LOOPBACK_EMAC)
- elink_emac_enable(params, vars, 1);
- else
- elink_emac_enable(params, vars, 0);
- }
- vars->link_up = 1;
-
- if (CHIP_IS_E1x(sc))
- elink_pbf_update(params, vars->flow_ctrl, vars->line_speed);
- /* Disable drain */
- REG_WR(sc, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
-
- /* Update shared memory */
- elink_update_mng(params, vars->link_status);
- return ELINK_STATUS_OK;
-}
-#endif
static void elink_init_bmac_loopback(struct elink_params *params,
struct elink_vars *vars)
{
@@ -12236,12 +12002,8 @@ elink_status_t elink_phy_init(struct elink_params *params,
ELINK_NIG_MASK_XGXS0_LINK10G |
ELINK_NIG_MASK_SERDES0_LINK_STATUS |
ELINK_NIG_MASK_MI_INT));
-#ifdef ELINK_INCLUDE_EMUL
- if (!(params->feature_config_flags &
- ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC))
-#endif
- elink_emac_init(params);
+ elink_emac_init(params);
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
vars->link_status |= LINK_STATUS_PFC_ENABLED;
@@ -12253,45 +12015,36 @@ elink_status_t elink_phy_init(struct elink_params *params,
set_phy_vars(params, vars);
PMD_DRV_LOG(DEBUG, "Num of phys on board: %d", params->num_phys);
-#ifdef ELINK_INCLUDE_FPGA
- if (CHIP_REV_IS_FPGA(sc)) {
- return elink_init_fpga(params, vars);
- } else
-#endif
-#ifdef ELINK_INCLUDE_EMUL
- if (CHIP_REV_IS_EMUL(sc)) {
- return elink_init_emul(params, vars);
- } else
-#endif
- switch (params->loopback_mode) {
- case ELINK_LOOPBACK_BMAC:
- elink_init_bmac_loopback(params, vars);
- break;
- case ELINK_LOOPBACK_EMAC:
- elink_init_emac_loopback(params, vars);
- break;
- case ELINK_LOOPBACK_XMAC:
- elink_init_xmac_loopback(params, vars);
- break;
- case ELINK_LOOPBACK_UMAC:
- elink_init_umac_loopback(params, vars);
- break;
- case ELINK_LOOPBACK_XGXS:
- case ELINK_LOOPBACK_EXT_PHY:
- elink_init_xgxs_loopback(params, vars);
- break;
- default:
- if (!CHIP_IS_E3(sc)) {
- if (params->switch_cfg == ELINK_SWITCH_CFG_10G)
- elink_xgxs_deassert(params);
- else
- elink_serdes_deassert(sc, params->port);
- }
- elink_link_initialize(params, vars);
- DELAY(1000 * 30);
- elink_link_int_enable(params);
- break;
+
+ switch (params->loopback_mode) {
+ case ELINK_LOOPBACK_BMAC:
+ elink_init_bmac_loopback(params, vars);
+ break;
+ case ELINK_LOOPBACK_EMAC:
+ elink_init_emac_loopback(params, vars);
+ break;
+ case ELINK_LOOPBACK_XMAC:
+ elink_init_xmac_loopback(params, vars);
+ break;
+ case ELINK_LOOPBACK_UMAC:
+ elink_init_umac_loopback(params, vars);
+ break;
+ case ELINK_LOOPBACK_XGXS:
+ case ELINK_LOOPBACK_EXT_PHY:
+ elink_init_xgxs_loopback(params, vars);
+ break;
+ default:
+ if (!CHIP_IS_E3(sc)) {
+ if (params->switch_cfg == ELINK_SWITCH_CFG_10G)
+ elink_xgxs_deassert(params);
+ else
+ elink_serdes_deassert(sc, params->port);
}
+ elink_link_initialize(params, vars);
+ DELAY(1000 * 30);
+ elink_link_int_enable(params);
+ break;
+ }
elink_update_mng(params, vars->link_status);
elink_update_mng_eee(params, vars->eee_status);
@@ -12325,22 +12078,12 @@ static elink_status_t elink_link_reset(struct elink_params *params,
REG_WR(sc, NIG_REG_BMAC0_OUT_EN + port * 4, 0);
REG_WR(sc, NIG_REG_EGRESS_EMAC0_OUT_EN + port * 4, 0);
}
-#ifdef ELINK_INCLUDE_EMUL
- /* Stop BigMac rx */
- if (!(params->feature_config_flags &
- ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC))
-#endif
- if (!CHIP_IS_E3(sc))
- elink_set_bmac_rx(sc, port, 0);
-#ifdef ELINK_INCLUDE_EMUL
- /* Stop XMAC/UMAC rx */
- if (!(params->feature_config_flags &
- ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC))
-#endif
- if (CHIP_IS_E3(sc) && !CHIP_REV_IS_FPGA(sc)) {
- elink_set_xmac_rxtx(params, 0);
- elink_set_umac_rxtx(params, 0);
- }
+ if (!CHIP_IS_E3(sc))
+ elink_set_bmac_rx(sc, port, 0);
+ if (CHIP_IS_E3(sc) && !CHIP_REV_IS_FPGA(sc)) {
+ elink_set_xmac_rxtx(params, 0);
+ elink_set_umac_rxtx(params, 0);
+ }
/* Disable emac */
if (!CHIP_IS_E3(sc))
REG_WR(sc, NIG_REG_NIG_EMAC0_EN + port * 4, 0);
@@ -12376,14 +12119,11 @@ static elink_status_t elink_link_reset(struct elink_params *params,
elink_bits_dis(sc, NIG_REG_LATCH_BC_0 + port * 4,
1 << ELINK_NIG_LATCH_BC_ENABLE_MI_INT);
}
-#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA)
- if (!CHIP_REV_IS_SLOW(sc))
-#endif
- if (params->phy[ELINK_INT_PHY].link_reset)
- params->phy[ELINK_INT_PHY].link_reset(&params->
- phy
- [ELINK_INT_PHY],
- params);
+ if (params->phy[ELINK_INT_PHY].link_reset)
+ params->phy[ELINK_INT_PHY].link_reset(&params->
+ phy
+ [ELINK_INT_PHY],
+ params);
/* Disable nig ingress interface */
if (!CHIP_IS_E3(sc)) {
@@ -12868,10 +12608,6 @@ elink_status_t elink_common_init_phy(struct bnx2x_softc * sc,
uint32_t phy_ver, val;
uint8_t phy_index = 0;
uint32_t ext_phy_type, ext_phy_config;
-#if defined(ELINK_INCLUDE_EMUL) || defined(ELINK_INCLUDE_FPGA)
- if (CHIP_REV_IS_EMUL(sc) || CHIP_REV_IS_FPGA(sc))
- return ELINK_STATUS_OK;
-#endif
elink_set_mdio_clk(sc, GRCBASE_EMAC0);
elink_set_mdio_clk(sc, GRCBASE_EMAC1);
diff --git a/drivers/net/bnx2x/elink.h b/drivers/net/bnx2x/elink.h
index c4f886a7..9401b7cd 100644
--- a/drivers/net/bnx2x/elink.h
+++ b/drivers/net/bnx2x/elink.h
@@ -359,10 +359,6 @@ struct elink_params {
#define ELINK_FEATURE_CONFIG_PFC_ENABLED (1<<1)
#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
-#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC (1<<4)
-#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC (1<<5)
-#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC (1<<6)
-#define ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC (1<<7)
#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8)
#define ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
#define ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
index d9c5a4cb..65aaa929 100644
--- a/drivers/net/bnxt/Makefile
+++ b/drivers/net/bnxt/Makefile
@@ -59,6 +59,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_stats.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c
SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c
+SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c
#
# Export include files
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 0e21aceb..4418c7fd 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -63,6 +63,7 @@ struct bnxt_vf_info {
uint16_t max_rx_rings;
uint16_t max_l2_ctx;
uint16_t max_vnics;
+ uint16_t vlan;
struct bnxt_pf_info *pf;
};
@@ -94,7 +95,7 @@ struct bnxt_pf_info {
#define BNXT_LINK_WAIT_CNT 10
#define BNXT_LINK_WAIT_INTERVAL 100
struct bnxt_link_info {
- uint8_t phy_flags;
+ uint32_t phy_flags;
uint8_t mac_type;
uint8_t phy_link_status;
uint8_t loop_back;
@@ -130,6 +131,8 @@ struct bnxt {
#define BNXT_FLAG_VF (1 << 1)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#define BNXT_NPAR_ENABLED(bp) ((bp)->port_partition_type)
+#define BNXT_NPAR_PF(bp) (BNXT_PF(bp) && BNXT_NPAR_ENABLED(bp))
unsigned int rx_nr_rings;
unsigned int rx_cp_nr_rings;
@@ -156,6 +159,8 @@ struct bnxt {
#define MAX_FF_POOLS ETH_64_POOLS
STAILQ_HEAD(, bnxt_vnic_info) ff_pool[MAX_FF_POOLS];
+ struct bnxt_irq *irq_tbl;
+
#define MAX_NUM_MAC_ADDR 32
uint8_t mac_addr[ETHER_ADDR_LEN];
@@ -171,7 +176,10 @@ struct bnxt {
struct bnxt_pf_info pf;
struct bnxt_vf_info vf;
+ uint8_t port_partition_type;
uint8_t dev_stopped;
};
+int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
+
#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 60c277a1..3aedcb8d 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -42,28 +42,23 @@
/*
* Async event handling
*/
-void bnxt_handle_async_event(struct bnxt *bp __rte_unused,
+void bnxt_handle_async_event(struct bnxt *bp,
struct cmpl_base *cmp)
{
struct hwrm_async_event_cmpl *async_cmp =
(struct hwrm_async_event_cmpl *)cmp;
+ uint16_t event_id = rte_le_to_cpu_16(async_cmp->event_id);
/* TODO: HWRM async events are not defined yet */
/* Needs to handle: link events, error events, etc. */
- switch (async_cmp->event_id) {
- case 0:
- /* Assume LINK_CHANGE == 0 */
- RTE_LOG(INFO, PMD, "Link change event\n");
-
- /* Can just prompt the update_op routine to do a qcfg
- * instead of doing the actual qcfg
- */
- break;
- case 1:
+ switch (event_id) {
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
+ bnxt_link_update_op(bp->eth_dev, 0);
break;
default:
- RTE_LOG(ERR, PMD, "handle_async_event id = 0x%x\n",
- async_cmp->event_id);
+ RTE_LOG(ERR, PMD, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
@@ -124,6 +119,9 @@ void bnxt_free_def_cp_ring(struct bnxt *bp)
{
struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ if (cpr == NULL)
+ return;
+
bnxt_free_ring(cpr->cp_ring_struct);
rte_free(cpr->cp_ring_struct);
rte_free(cpr);
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index c176f8c5..f9f2adb4 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -54,6 +54,7 @@
RING_CMP(cpr->cp_ring_struct, raw_cons)))
#define B_CP_DIS_DB(cpr, raw_cons) \
+ rte_smp_wmb(); \
(*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS | \
RING_CMP(cpr->cp_ring_struct, raw_cons)))
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 7052ecf3..035fe07a 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -43,6 +43,7 @@
#include "bnxt_cpr.h"
#include "bnxt_filter.h"
#include "bnxt_hwrm.h"
+#include "bnxt_irq.h"
#include "bnxt_ring.h"
#include "bnxt_rxq.h"
#include "bnxt_rxr.h"
@@ -62,24 +63,38 @@ static const char bnxt_version[] =
#define BROADCOM_DEV_ID_57302 0x16c9
#define BROADCOM_DEV_ID_57304_PF 0x16ca
#define BROADCOM_DEV_ID_57304_VF 0x16cb
+#define BROADCOM_DEV_ID_NS2 0x16cd
#define BROADCOM_DEV_ID_57402 0x16d0
#define BROADCOM_DEV_ID_57404 0x16d1
#define BROADCOM_DEV_ID_57406_PF 0x16d2
#define BROADCOM_DEV_ID_57406_VF 0x16d3
-#define BROADCOM_DEV_ID_57406_MF 0x16d4
-#define BROADCOM_DEV_ID_57314 0x16df
+#define BROADCOM_DEV_ID_57402_MF 0x16d4
+#define BROADCOM_DEV_ID_57407_RJ45 0x16d5
+#define BROADCOM_DEV_ID_5741X_VF 0x16dc
+#define BROADCOM_DEV_ID_5731X_VF 0x16e1
+#define BROADCOM_DEV_ID_57404_MF 0x16e7
+#define BROADCOM_DEV_ID_57406_MF 0x16e8
+#define BROADCOM_DEV_ID_57407_SFP 0x16e9
+#define BROADCOM_DEV_ID_57407_MF 0x16ea
static struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_NS2) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404_MF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) },
- { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -145,6 +160,7 @@ alloc_mem_err:
static int bnxt_init_chip(struct bnxt *bp)
{
unsigned int i, rss_idx, fw_idx;
+ struct rte_eth_link new;
int rc;
rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
@@ -231,6 +247,21 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
+ rc = bnxt_get_hwrm_link_config(bp, &new);
+ if (rc) {
+ RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
+ goto err_out;
+ }
+
+ if (!bp->link_info.link_up) {
+ rc = bnxt_set_hwrm_link_config(bp, true);
+ if (rc) {
+ RTE_LOG(ERR, PMD,
+ "HWRM link config failure rc: %x\n", rc);
+ goto err_out;
+ }
+ }
+
return 0;
err_out:
@@ -322,6 +353,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
ETH_TXQ_FLAGS_NOOFFLOADS,
};
+ eth_dev->data->dev_conf.intr_conf.lsc = 1;
+
/* *INDENT-ON* */
/*
@@ -360,7 +393,6 @@ found:
static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
- int rc;
bp->rx_queues = (void *)eth_dev->data->rx_queues;
bp->tx_queues = (void *)eth_dev->data->tx_queues;
@@ -375,8 +407,42 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE;
- rc = bnxt_set_hwrm_link_config(bp, true);
- return rc;
+ return 0;
+}
+
+static inline int
+rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &eth_dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return 1;
+
+ return 0;
+}
+
+static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_link *link = &eth_dev->data->dev_link;
+
+ if (link->link_status)
+ RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
+ (uint8_t)(eth_dev->data->port_id),
+ (uint32_t)link->link_speed,
+ (link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ else
+ RTE_LOG(INFO, PMD, "Port %d Link Down\n",
+ (uint8_t)(eth_dev->data->port_id));
+}
+
+static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev)
+{
+ bnxt_print_link_info(eth_dev);
+ return 0;
}
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
@@ -392,18 +458,31 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
goto error;
}
+ rc = bnxt_setup_int(bp);
+ if (rc)
+ goto error;
+
rc = bnxt_alloc_mem(bp);
if (rc)
goto error;
+ rc = bnxt_request_int(bp);
+ if (rc)
+ goto error;
+
rc = bnxt_init_nic(bp);
if (rc)
goto error;
+ bnxt_enable_int(bp);
+
+ bnxt_link_update_op(eth_dev, 0);
return 0;
error:
bnxt_shutdown_nic(bp);
+ bnxt_disable_int(bp);
+ bnxt_free_int(bp);
bnxt_free_tx_mbufs(bp);
bnxt_free_rx_mbufs(bp);
bnxt_free_mem(bp);
@@ -437,7 +516,11 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
/* TBD: STOP HW queues DMA */
eth_dev->data->dev_link.link_status = 0;
}
+ bnxt_set_hwrm_link_config(bp, false);
+ bnxt_disable_int(bp);
+ bnxt_free_int(bp);
bnxt_shutdown_nic(bp);
+ bp->dev_stopped = 1;
}
static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
@@ -450,7 +533,14 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
bnxt_free_tx_mbufs(bp);
bnxt_free_rx_mbufs(bp);
bnxt_free_mem(bp);
- rte_free(eth_dev->data->mac_addrs);
+ if (eth_dev->data->mac_addrs != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ }
+ if (bp->grp_info != NULL) {
+ rte_free(bp->grp_info);
+ bp->grp_info = NULL;
+ }
}
static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
@@ -499,6 +589,11 @@ static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
struct bnxt_filter_info *filter;
+ if (BNXT_VF(bp)) {
+ RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
+ return;
+ }
+
if (!vnic) {
RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
return;
@@ -522,8 +617,7 @@ static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
bnxt_hwrm_set_filter(bp, vnic, filter);
}
-static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
- int wait_to_complete)
+int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
{
int rc = 0;
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
@@ -541,21 +635,20 @@ static int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
"Failed to retrieve link rc = 0x%x!", rc);
goto out;
}
- if (!wait_to_complete)
- break;
-
rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
+ if (!wait_to_complete)
+ break;
} while (!new.link_status && cnt--);
+out:
/* Timed out or success */
- if (new.link_status) {
- /* Update only if success */
- eth_dev->data->dev_link.link_duplex = new.link_duplex;
- eth_dev->data->dev_link.link_speed = new.link_speed;
+ if (new.link_status != eth_dev->data->dev_link.link_status ||
+ new.link_speed != eth_dev->data->dev_link.link_speed) {
+ rte_bnxt_atomic_write_link_status(eth_dev, &new);
+ bnxt_print_link_info(eth_dev);
}
- eth_dev->data->dev_link.link_status = new.link_status;
-out:
+
return rc;
}
@@ -666,6 +759,11 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
/* EW - need to revisit here copying from u64 to u16 */
memcpy(reta_conf, vnic->rss_table, reta_size);
+ if (rte_intr_allow_others(&eth_dev->pci_dev->intr_handle)) {
+ if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
+ bnxt_dev_lsc_intr_setup(eth_dev);
+ }
+
return 0;
}
@@ -817,6 +915,11 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
+ RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
+ return -ENOTSUP;
+ }
+
switch (fc_conf->mode) {
case RTE_FC_NONE:
bp->link_info.auto_pause = 0;
@@ -897,7 +1000,9 @@ static struct eth_dev_ops bnxt_dev_ops = {
static bool bnxt_vf_pciid(uint16_t id)
{
if (id == BROADCOM_DEV_ID_57304_VF ||
- id == BROADCOM_DEV_ID_57406_VF)
+ id == BROADCOM_DEV_ID_57406_VF ||
+ id == BROADCOM_DEV_ID_5731X_VF ||
+ id == BROADCOM_DEV_ID_5741X_VF)
return true;
return false;
}
@@ -945,14 +1050,6 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (version_printed++ == 0)
RTE_LOG(INFO, PMD, "%s", bnxt_version);
- if (eth_dev->pci_dev->addr.function >= 2 &&
- eth_dev->pci_dev->addr.function < 4) {
- RTE_LOG(ERR, PMD, "Function not enabled %x:\n",
- eth_dev->pci_dev->addr.function);
- rc = -ENOMEM;
- goto error;
- }
-
rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
bp = eth_dev->data->dev_private;
@@ -980,6 +1077,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
goto error_free;
bnxt_hwrm_queue_qportcfg(bp);
+ bnxt_hwrm_func_qcfg(bp);
+
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
@@ -1040,39 +1139,37 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
struct bnxt *bp = eth_dev->data->dev_private;
int rc;
- if (eth_dev->data->mac_addrs)
+ if (eth_dev->data->mac_addrs != NULL) {
rte_free(eth_dev->data->mac_addrs);
- if (bp->grp_info)
+ eth_dev->data->mac_addrs = NULL;
+ }
+ if (bp->grp_info != NULL) {
rte_free(bp->grp_info);
+ bp->grp_info = NULL;
+ }
rc = bnxt_hwrm_func_driver_unregister(bp, 0);
bnxt_free_hwrm_resources(bp);
if (bp->dev_stopped == 0)
bnxt_dev_close_op(eth_dev);
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
return rc;
}
static struct eth_driver bnxt_rte_pmd = {
.pci_drv = {
- .name = "rte_" DRV_MODULE_NAME "_pmd",
.id_table = bnxt_pci_id_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
+ RTE_PCI_DRV_DETACHABLE | RTE_PCI_DRV_INTR_LSC,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove
},
.eth_dev_init = bnxt_dev_init,
.eth_dev_uninit = bnxt_dev_uninit,
.dev_private_size = sizeof(struct bnxt),
};
-static int bnxt_rte_pmd_init(const char *name, const char *params __rte_unused)
-{
- RTE_LOG(INFO, PMD, "bnxt_rte_pmd_init() called for %s\n", name);
- rte_eth_driver_register(&bnxt_rte_pmd);
- return 0;
-}
-
-static struct rte_driver bnxt_pmd_drv = {
- .type = PMD_PDEV,
- .init = bnxt_rte_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(bnxt_pmd_drv, bnxt);
-DRIVER_REGISTER_PCI_TABLE(bnxt, bnxt_pci_id_map);
+RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index f03a1dc8..df1042cf 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -118,6 +118,9 @@ void bnxt_free_filter_mem(struct bnxt *bp)
uint16_t max_filters, i;
int rc = 0;
+ if (bp->filter_info == NULL)
+ return;
+
/* Ensure that all filters are freed */
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 2ed4c2f1..07e71241 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -288,7 +288,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = rte_le_to_cpu_32(resp->fid);
pf->port_id = resp->port_id;
- memcpy(pf->mac_addr, resp->perm_mac_address, ETHER_ADDR_LEN);
+ memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
@@ -301,7 +301,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = rte_le_to_cpu_32(resp->fid);
- memcpy(vf->mac_addr, &resp->perm_mac_address, ETHER_ADDR_LEN);
+ memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
@@ -342,13 +342,16 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
req.flags = flags;
- req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER;
+ req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
req.ver_maj = RTE_VER_YEAR;
req.ver_min = RTE_VER_MONTH;
req.ver_upd = RTE_VER_MINOR;
memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
+ req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
+
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
@@ -386,6 +389,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_intf_maj, resp->hwrm_intf_min,
resp->hwrm_intf_upd,
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
+ RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
+ HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
my_version = HWRM_VERSION_MAJOR << 16;
my_version |= HWRM_VERSION_MINOR << 8;
@@ -468,49 +473,44 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
{
int rc = 0;
- struct hwrm_port_phy_cfg_input req = {.req_type = 0};
+ struct hwrm_port_phy_cfg_input req = {0};
struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint32_t enables = 0;
HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
- req.flags = conf->phy_flags;
if (conf->link_up) {
- req.force_link_speed = conf->link_speed;
+ req.flags = rte_cpu_to_le_32(conf->phy_flags);
+ req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
/*
* Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
* any auto mode, even "none".
*/
- if (req.auto_mode == HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE) {
- req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
- } else {
- req.auto_mode = conf->auto_mode;
- req.enables |=
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
+ if (!conf->link_speed) {
+ req.auto_mode |= conf->auto_mode;
+ enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
req.auto_link_speed_mask = conf->auto_link_speed_mask;
- req.enables |=
+ enables |=
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
- req.auto_link_speed = conf->auto_link_speed;
- req.enables |=
+ req.auto_link_speed = bp->link_info.auto_link_speed;
+ enables |=
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
}
req.auto_duplex = conf->duplex;
- req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
req.auto_pause = conf->auto_pause;
+ req.force_pause = conf->force_pause;
/* Set force_pause if there is no auto or if there is a force */
- if (req.auto_pause)
- req.enables |=
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
+ if (req.auto_pause && !req.force_pause)
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
else
- req.enables |=
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
- req.force_pause = conf->force_pause;
- if (req.force_pause)
- req.enables |=
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
+
+ req.enables = rte_cpu_to_le_32(enables);
} else {
- req.flags &= ~HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
- req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN;
- req.force_link_speed = 0;
+ req.flags =
+ rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
+ RTE_LOG(INFO, PMD, "Force Link Down\n");
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -524,7 +524,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
struct bnxt_link_info *link_info)
{
int rc = 0;
- struct hwrm_port_phy_qcfg_input req = {.req_type = 0};
+ struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
@@ -534,7 +534,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
HWRM_CHECK_RESULT;
link_info->phy_link_status = resp->link;
- if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
+ if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
link_info->link_up = 1;
link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
} else {
@@ -606,6 +606,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
req.queue_id = bp->cos_queue[0].id;
+ /* FALLTHROUGH */
case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
req.ring_type = ring_type;
req.cmpl_ring_id =
@@ -809,7 +810,7 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc = 0, i, j;
- struct hwrm_vnic_alloc_input req = {.req_type = 0 };
+ struct hwrm_vnic_alloc_input req = { 0 };
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
/* map ring groups to this vnic */
@@ -1250,42 +1251,42 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
{
uint16_t eth_link_speed = 0;
- if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
+ if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
return ETH_LINK_SPEED_AUTONEG;
switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
case ETH_LINK_SPEED_100M:
case ETH_LINK_SPEED_100M_HD:
eth_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB;
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
break;
case ETH_LINK_SPEED_1G:
eth_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
break;
case ETH_LINK_SPEED_2_5G:
eth_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
break;
case ETH_LINK_SPEED_10G:
eth_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
break;
case ETH_LINK_SPEED_20G:
eth_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
break;
case ETH_LINK_SPEED_25G:
eth_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
break;
case ETH_LINK_SPEED_40G:
eth_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
break;
case ETH_LINK_SPEED_50G:
eth_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
default:
RTE_LOG(ERR, PMD,
@@ -1452,39 +1453,80 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
struct bnxt_link_info link_req;
uint16_t speed;
+ if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
+ return 0;
+
rc = bnxt_valid_link_speed(dev_conf->link_speeds,
bp->eth_dev->data->port_id);
if (rc)
goto error;
memset(&link_req, 0, sizeof(link_req));
- speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
link_req.link_up = link_up;
+ if (!link_up)
+ goto port_phy_cfg;
+
+ speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
+ link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
if (speed == 0) {
- link_req.phy_flags =
+ link_req.phy_flags |=
HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
link_req.auto_mode =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW;
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
link_req.auto_link_speed_mask =
bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
- link_req.auto_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB;
} else {
- link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
- link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE |
- HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
+ link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
link_req.link_speed = speed;
+ RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
}
link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
link_req.auto_pause = bp->link_info.auto_pause;
link_req.force_pause = bp->link_info.force_pause;
+port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
if (rc) {
RTE_LOG(ERR, PMD,
"Set link config failed with rc %d\n", rc);
}
+ rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
error:
return rc;
}
+
+/* JIRA 22088 */
+int bnxt_hwrm_func_qcfg(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc = 0;
+
+ HWRM_PREP(req, FUNC_QCFG, -1, resp);
+ req.fid = rte_cpu_to_le_16(0xffff);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ if (BNXT_VF(bp)) {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ /* Hard Coded.. 0xfff VLAN ID mask */
+ vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
+ }
+
+ switch (resp->port_partition_type) {
+ case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
+ case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
+ case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
+ bp->port_partition_type = resp->port_partition_type;
+ break;
+ default:
+ bp->port_partition_type = 0;
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index a508024f..6519ef21 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -100,5 +100,6 @@ void bnxt_free_hwrm_resources(struct bnxt *bp);
int bnxt_alloc_hwrm_resources(struct bnxt *bp);
int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
+int bnxt_hwrm_func_qcfg(struct bnxt *bp);
#endif
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
new file mode 100644
index 00000000..e93585a0
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -0,0 +1,156 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Broadcom Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+
+#include <rte_malloc.h>
+
+#include "bnxt.h"
+#include "bnxt_cpr.h"
+#include "bnxt_irq.h"
+#include "bnxt_ring.h"
+#include "hsi_struct_def_dpdk.h"
+
+/*
+ * Interrupts
+ */
+
+static void bnxt_int_handler(struct rte_intr_handle *handle __rte_unused,
+ void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+ uint32_t raw_cons = cpr->cp_raw_cons;
+ uint32_t cons;
+ struct cmpl_base *cmp;
+
+ while (1) {
+ cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
+ cmp = &cpr->cp_desc_ring[cons];
+
+ if (!CMP_VALID(cmp, raw_cons, cpr->cp_ring_struct))
+ break;
+
+ switch (CMP_TYPE(cmp)) {
+ case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
+ /* Handle any async event */
+ bnxt_handle_async_event(bp, cmp);
+ break;
+ case CMPL_BASE_TYPE_HWRM_FWD_RESP:
+ /* Handle HWRM forwarded responses */
+ bnxt_handle_fwd_req(bp, cmp);
+ break;
+ default:
+ /* Ignore any other events */
+ break;
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ }
+ B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+}
+
+void bnxt_free_int(struct bnxt *bp)
+{
+ struct bnxt_irq *irq;
+
+ irq = bp->irq_tbl;
+ if (irq) {
+ if (irq->requested) {
+ rte_intr_disable(&bp->pdev->intr_handle);
+ rte_intr_callback_unregister(&bp->pdev->intr_handle,
+ irq->handler,
+ (void *)bp->eth_dev);
+ irq->requested = 0;
+ }
+ rte_free((void *)bp->irq_tbl);
+ bp->irq_tbl = NULL;
+ }
+}
+
+void bnxt_disable_int(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+
+ /* Only the default completion ring */
+ B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+}
+
+void bnxt_enable_int(struct bnxt *bp)
+{
+ struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
+
+ B_CP_DB_REARM(cpr, cpr->cp_raw_cons);
+}
+
+int bnxt_setup_int(struct bnxt *bp)
+{
+ uint16_t total_vecs;
+ const int len = sizeof(bp->irq_tbl[0].name);
+ int i, rc = 0;
+
+ /* DPDK host only supports 1 MSI-X vector */
+ total_vecs = 1;
+ bp->irq_tbl = rte_calloc("bnxt_irq_tbl", total_vecs,
+ sizeof(struct bnxt_irq), 0);
+ if (bp->irq_tbl) {
+ for (i = 0; i < total_vecs; i++) {
+ bp->irq_tbl[i].vector = i;
+ snprintf(bp->irq_tbl[i].name, len,
+ "%s-%d", bp->eth_dev->data->name, i);
+ bp->irq_tbl[i].handler = bnxt_int_handler;
+ }
+ } else {
+ rc = -ENOMEM;
+ goto setup_exit;
+ }
+ return 0;
+
+setup_exit:
+ RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed");
+ return rc;
+}
+
+int bnxt_request_int(struct bnxt *bp)
+{
+ int rc = 0;
+
+ struct bnxt_irq *irq = bp->irq_tbl;
+
+ rte_intr_callback_register(&bp->pdev->intr_handle, irq->handler,
+ (void *)bp->eth_dev);
+ rte_intr_enable(&bp->pdev->intr_handle);
+
+ irq->requested = 1;
+ return rc;
+}
diff --git a/drivers/net/bnxt/bnxt_irq.h b/drivers/net/bnxt/bnxt_irq.h
new file mode 100644
index 00000000..e21bec56
--- /dev/null
+++ b/drivers/net/bnxt/bnxt_irq.h
@@ -0,0 +1,51 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014-2015 Broadcom Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_IRQ_H_
+#define _BNXT_IRQ_H_
+
+struct bnxt_irq {
+ rte_intr_callback_fn handler;
+ unsigned int vector;
+ uint8_t requested;
+ char name[RTE_ETH_NAME_MAX_LEN + 2];
+};
+
+struct bnxt;
+void bnxt_free_int(struct bnxt *bp);
+void bnxt_disable_int(struct bnxt *bp);
+void bnxt_enable_int(struct bnxt *bp);
+int bnxt_setup_int(struct bnxt *bp);
+int bnxt_request_int(struct bnxt *bp);
+
+#endif
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index 6f1c7602..40c9cac1 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -103,7 +103,7 @@ void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_stats->ibytes += bnxt_stats->q_ibytes[i];
bnxt_stats->imissed += bnxt_stats->q_errors[i];
bnxt_stats->ierrors +=
- rte_le_to_cpu_64(hw_stats->rx_err_pkts);
+ rte_le_to_cpu_64(hw_stats->rx_discard_pkts);
}
for (i = 0; i < bp->tx_cp_nr_rings; i++) {
@@ -130,7 +130,7 @@ void bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
bnxt_stats->opackets += bnxt_stats->q_opackets[i];
bnxt_stats->obytes += bnxt_stats->q_obytes[i];
bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_drop_pkts);
- bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_err_pkts);
+ bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_discard_pkts);
}
}
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 2176acaa..4c16101b 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -38,6 +38,7 @@
#define BNXT_TX_PUSH_THRESH 92
#define B_TX_DB(db, prod) \
+ rte_smp_wmb(); \
(*(uint32_t *)db = (DB_KEY_TX | prod))
struct bnxt_tx_ring_info {
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 1b5f54c4..33fdde2f 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -175,7 +175,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
struct rte_pci_device *pdev = bp->pdev;
const struct rte_memzone *mz;
char mz_name[RTE_MEMZONE_NAMESIZE];
- uint16_t entry_length = RTE_CACHE_LINE_ROUNDUP(
+ uint32_t entry_length = RTE_CACHE_LINE_ROUNDUP(
HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) +
HW_HASH_KEY_SIZE);
uint16_t max_vnics;
@@ -229,6 +229,9 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
struct bnxt_vnic_info *vnic;
uint16_t max_vnics, i;
+ if (bp->vnic_info == NULL)
+ return;
+
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index f2db3eaf..f0248377 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -43,7 +43,7 @@ struct ctx_hw_stats64 {
uint64_t rx_mcast_pkts;
uint64_t rx_bcast_pkts;
uint64_t rx_drop_pkts;
- uint64_t rx_err_pkts;
+ uint64_t rx_discard_pkts;
uint64_t rx_ucast_bytes;
uint64_t rx_mcast_bytes;
uint64_t rx_bcast_bytes;
@@ -52,7 +52,7 @@ struct ctx_hw_stats64 {
uint64_t tx_mcast_pkts;
uint64_t tx_bcast_pkts;
uint64_t tx_drop_pkts;
- uint64_t tx_err_pkts;
+ uint64_t tx_discard_pkts;
uint64_t tx_ucast_bytes;
uint64_t tx_mcast_bytes;
uint64_t tx_bcast_bytes;
@@ -61,12 +61,14 @@ struct ctx_hw_stats64 {
uint64_t tpa_bytes;
uint64_t tpa_events;
uint64_t tpa_aborts;
-} ctx_hw_stats64_t;
+} __attribute__((packed));
-/* HW Resource Manager Specification 1.2.0 */
+/* HW Resource Manager Specification 1.5.1 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 2
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_MINOR 5
+#define HWRM_VERSION_UPDATE 1
+
+#define HWRM_VERSION_STR "1.5.1"
/*
* Following is the signature for HWRM message field that indicates not
@@ -85,6 +87,7 @@ struct ctx_hw_stats64 {
#define HWRM_VER_GET (UINT32_C(0x0))
#define HWRM_FUNC_RESET (UINT32_C(0x11))
#define HWRM_FUNC_QCAPS (UINT32_C(0x15))
+#define HWRM_FUNC_QCFG (UINT32_C(0x16))
#define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a))
#define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d))
#define HWRM_PORT_PHY_CFG (UINT32_C(0x20))
@@ -115,1859 +118,1612 @@ struct ctx_hw_stats64 {
/* Short TX BD (16 bytes) */
struct tx_bd_short {
+ uint16_t flags_type;
/*
- * All bits in this field must be valid on the first BD of a packet.
- * Only the packet_end bit must be valid for the remaining BDs of a
- * packet.
+ * All bits in this field must be valid on the first BD of a
+ * packet. Only the packet_end bit must be valid for the
+ * remaining BDs of a packet.
*/
/* This value identifies the type of buffer descriptor. */
- #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f)
- #define TX_BD_SHORT_TYPE_SFT 0
- /*
- * Indicates that this BD is 16B long and is used for normal L2
- * packet transmission.
- */
- #define TX_BD_SHORT_TYPE_TX_BD_SHORT (UINT32_C(0x0) << 0)
- /*
- * If set to 1, the packet ends with the data in the buffer pointed to
- * by this descriptor. This flag must be valid on every BD.
- */
- #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40)
- /*
- * If set to 1, the device will not generate a completion for this
- * transmit packet unless there is an error in it's processing. If this
- * bit is set to 0, then the packet will be completed normally. This bit
- * must be valid only on the first BD of a packet.
- */
- #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80)
- /*
- * This value indicates how many 16B BD locations are consumed in the
- * ring by this packet. A value of 1 indicates that this BD is the only
- * BD (and that the it is a short BD). A value of 3 indicates either 3
- * short BDs or 1 long BD and one short BD in the packet. A value of 0
- * indicates that there are 32 BD locations in the packet (the maximum).
- * This field is valid only on the first BD of a packet.
- */
- #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
- #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8
- /*
- * This value is a hint for the length of the entire packet. It is used
- * by the chip to optimize internal processing. The packet will be
- * dropped if the hint is too short. This field is valid only on the
- * first BD of a packet.
- */
- #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000)
- #define TX_BD_SHORT_FLAGS_LHINT_SFT 13
- /* indicates packet length < 512B */
- #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
- /* indicates 512 <= packet length < 1KB */
- #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
- /* indicates 1KB <= packet length < 2KB */
- #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
- /* indicates packet length >= 2KB */
- #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
- #define TX_BD_SHORT_FLAGS_LHINT_LAST TX_BD_SHORT_FLAGS_LHINT_GTE2K
+ #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_SHORT_TYPE_SFT 0
/*
- * If set to 1, the device immediately updates the Send Consumer Index
- * after the buffer associated with this descriptor has been transferred
- * via DMA to NIC memory from host memory. An interrupt may or may not
- * be generated according to the state of the interrupt avoidance
- * mechanisms. If this bit is set to 0, then the Consumer Index is only
- * updated as soon as one of the host interrupt coalescing conditions
- * has been met. This bit must be valid on the first BD of a packet.
+ * Indicates that this BD is 16B long and is
+ * used for normal L2 packet transmission.
*/
- #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000)
+ #define TX_BD_SHORT_TYPE_TX_BD_SHORT UINT32_C(0x0)
/*
- * All bits in this field must be valid on the first BD of a packet.
- * Only the packet_end bit must be valid for the remaining BDs of a
- * packet.
+ * If set to 1, the packet ends with the data in the buffer
+ * pointed to by this descriptor. This flag must be valid on
+ * every BD.
*/
- #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0)
- #define TX_BD_SHORT_FLAGS_SFT 6
- uint16_t flags_type;
-
+ #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40)
+ /*
+ * If set to 1, the device will not generate a completion for
+ * this transmit packet unless there is an error in it's
+ * processing. If this bit is set to 0, then the packet will be
+ * completed normally. This bit must be valid only on the first
+ * BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80)
+ /*
+ * This value indicates how many 16B BD locations are consumed
+ * in the ring by this packet. A value of 1 indicates that this
+ * BD is the only BD (and that the it is a short BD). A value of
+ * 3 indicates either 3 short BDs or 1 long BD and one short BD
+ * in the packet. A value of 0 indicates that there are 32 BD
+ * locations in the packet (the maximum). This field is valid
+ * only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8
+ /*
+ * This value is a hint for the length of the entire packet. It
+ * is used by the chip to optimize internal processing. The
+ * packet will be dropped if the hint is too short. This field
+ * is valid only on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_SHORT_FLAGS_LHINT_SFT 13
+ /* indicates packet length < 512B */
+ #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+ /* indicates 512 <= packet length < 1KB */
+ #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+ /* indicates 1KB <= packet length < 2KB */
+ #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+ /* indicates packet length >= 2KB */
+ #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+ #define TX_BD_SHORT_FLAGS_LHINT_LAST TX_BD_SHORT_FLAGS_LHINT_GTE2K
+ /*
+ * If set to 1, the device immediately updates the Send Consumer
+ * Index after the buffer associated with this descriptor has
+ * been transferred via DMA to NIC memory from host memory. An
+ * interrupt may or may not be generated according to the state
+ * of the interrupt avoidance mechanisms. If this bit is set to
+ * 0, then the Consumer Index is only updated as soon as one of
+ * the host interrupt coalescing conditions has been met. This
+ * bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000)
/*
- * This is the length of the host physical buffer this BD describes in
- * bytes. This field must be valid on all BDs of a packet.
+ * All bits in this field must be valid on the first BD of a
+ * packet. Only the packet_end bit must be valid for the
+ * remaining BDs of a packet.
*/
+ #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_SHORT_FLAGS_SFT 6
uint16_t len;
/*
- * The opaque data field is pass through to the completion and can be
- * used for any data that the driver wants to associate with the
- * transmit BD. This field must be valid on the first BD of a packet.
+ * This is the length of the host physical buffer this BD
+ * describes in bytes. This field must be valid on all BDs of a
+ * packet.
*/
uint32_t opaque;
-
/*
- * This is the host physical address for the portion of the packet
- * described by this TX BD. This value must be valid on all BDs of a
- * packet.
+ * The opaque data field is pass through to the completion and
+ * can be used for any data that the driver wants to associate
+ * with the transmit BD. This field must be valid on the first
+ * BD of a packet.
*/
uint64_t addr;
+ /*
+ * This is the host physical address for the portion of the
+ * packet described by this TX BD. This value must be valid on
+ * all BDs of a packet.
+ */
} __attribute__((packed));
/* Long TX BD (32 bytes split to 2 16-byte struct) */
struct tx_bd_long {
+ uint16_t flags_type;
/*
- * All bits in this field must be valid on the first BD of a packet.
- * Only the packet_end bit must be valid for the remaining BDs of a
- * packet.
+ * All bits in this field must be valid on the first BD of a
+ * packet. Only the packet_end bit must be valid for the
+ * remaining BDs of a packet.
*/
/* This value identifies the type of buffer descriptor. */
- #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f)
- #define TX_BD_LONG_TYPE_SFT 0
- /*
- * Indicates that this BD is 32B long and is used for normal L2
- * packet transmission.
- */
- #define TX_BD_LONG_TYPE_TX_BD_LONG (UINT32_C(0x10) << 0)
- /*
- * If set to 1, the packet ends with the data in the buffer pointed to
- * by this descriptor. This flag must be valid on every BD.
- */
- #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40)
- /*
- * If set to 1, the device will not generate a completion for this
- * transmit packet unless there is an error in it's processing. If this
- * bit is set to 0, then the packet will be completed normally. This bit
- * must be valid only on the first BD of a packet.
- */
- #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80)
- /*
- * This value indicates how many 16B BD locations are consumed in the
- * ring by this packet. A value of 1 indicates that this BD is the only
- * BD (and that the it is a short BD). A value of 3 indicates either 3
- * short BDs or 1 long BD and one short BD in the packet. A value of 0
- * indicates that there are 32 BD locations in the packet (the maximum).
- * This field is valid only on the first BD of a packet.
- */
- #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
- #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8
- /*
- * This value is a hint for the length of the entire packet. It is used
- * by the chip to optimize internal processing. The packet will be
- * dropped if the hint is too short. This field is valid only on the
- * first BD of a packet.
- */
- #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000)
- #define TX_BD_LONG_FLAGS_LHINT_SFT 13
- /* indicates packet length < 512B */
- #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
- /* indicates 512 <= packet length < 1KB */
- #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
- /* indicates 1KB <= packet length < 2KB */
- #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
- /* indicates packet length >= 2KB */
- #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
- #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K
+ #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_LONG_TYPE_SFT 0
/*
- * If set to 1, the device immediately updates the Send Consumer Index
- * after the buffer associated with this descriptor has been transferred
- * via DMA to NIC memory from host memory. An interrupt may or may not
- * be generated according to the state of the interrupt avoidance
- * mechanisms. If this bit is set to 0, then the Consumer Index is only
- * updated as soon as one of the host interrupt coalescing conditions
- * has been met. This bit must be valid on the first BD of a packet.
+ * Indicates that this BD is 32B long and is
+ * used for normal L2 packet transmission.
*/
- #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000)
+ #define TX_BD_LONG_TYPE_TX_BD_LONG UINT32_C(0x10)
/*
- * All bits in this field must be valid on the first BD of a packet.
- * Only the packet_end bit must be valid for the remaining BDs of a
- * packet.
+ * If set to 1, the packet ends with the data in the buffer
+ * pointed to by this descriptor. This flag must be valid on
+ * every BD.
*/
- #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0)
- #define TX_BD_LONG_FLAGS_SFT 6
- uint16_t flags_type;
-
+ #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40)
/*
- * This is the length of the host physical buffer this BD describes in
- * bytes. This field must be valid on all BDs of a packet.
+ * If set to 1, the device will not generate a completion for
+ * this transmit packet unless there is an error in it's
+ * processing. If this bit is set to 0, then the packet will be
+ * completed normally. This bit must be valid only on the first
+ * BD of a packet.
*/
+ #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80)
+ /*
+ * This value indicates how many 16B BD locations are consumed
+ * in the ring by this packet. A value of 1 indicates that this
+ * BD is the only BD (and that the it is a short BD). A value of
+ * 3 indicates either 3 short BDs or 1 long BD and one short BD
+ * in the packet. A value of 0 indicates that there are 32 BD
+ * locations in the packet (the maximum). This field is valid
+ * only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00)
+ #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8
+ /*
+ * This value is a hint for the length of the entire packet. It
+ * is used by the chip to optimize internal processing. The
+ * packet will be dropped if the hint is too short. This field
+ * is valid only on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000)
+ #define TX_BD_LONG_FLAGS_LHINT_SFT 13
+ /* indicates packet length < 512B */
+ #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13)
+ /* indicates 512 <= packet length < 1KB */
+ #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13)
+ /* indicates 1KB <= packet length < 2KB */
+ #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13)
+ /* indicates packet length >= 2KB */
+ #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13)
+ #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K
+ /*
+ * If set to 1, the device immediately updates the Send Consumer
+ * Index after the buffer associated with this descriptor has
+ * been transferred via DMA to NIC memory from host memory. An
+ * interrupt may or may not be generated according to the state
+ * of the interrupt avoidance mechanisms. If this bit is set to
+ * 0, then the Consumer Index is only updated as soon as one of
+ * the host interrupt coalescing conditions has been met. This
+ * bit must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000)
+ /*
+ * All bits in this field must be valid on the first BD of a
+ * packet. Only the packet_end bit must be valid for the
+ * remaining BDs of a packet.
+ */
+ #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_LONG_FLAGS_SFT 6
uint16_t len;
-
/*
- * The opaque data field is pass through to the completion and can be
- * used for any data that the driver wants to associate with the
- * transmit BD. This field must be valid on the first BD of a packet.
+ * This is the length of the host physical buffer this BD
+ * describes in bytes. This field must be valid on all BDs of a
+ * packet.
*/
uint32_t opaque;
-
/*
- * This is the host physical address for the portion of the packet
- * described by this TX BD. This value must be valid on all BDs of a
- * packet.
+ * The opaque data field is pass through to the completion and
+ * can be used for any data that the driver wants to associate
+ * with the transmit BD. This field must be valid on the first
+ * BD of a packet.
*/
uint64_t addr;
+ /*
+ * This is the host physical address for the portion of the
+ * packet described by this TX BD. This value must be valid on
+ * all BDs of a packet.
+ */
} __attribute__((packed));
/* last 16 bytes of Long TX BD */
-
struct tx_bd_long_hi {
+ uint16_t lflags;
/*
- * All bits in this field must be valid on the first BD of a packet.
- * Their value on other BDs of the packet will be ignored.
+ * All bits in this field must be valid on the first BD of a
+ * packet. Their value on other BDs of the packet will be
+ * ignored.
*/
/*
- * If set to 1, the controller replaces the TCP/UPD checksum fields of
- * normal TCP/UPD checksum, or the inner TCP/UDP checksum field of the
- * encapsulated TCP/UDP packets with the hardware calculated TCP/UDP
- * checksum for the packet associated with this descriptor. This bit
- * must be valid on the first BD of a packet.
+ * If set to 1, the controller replaces the TCP/UPD checksum
+ * fields of normal TCP/UPD checksum, or the inner TCP/UDP
+ * checksum field of the encapsulated TCP/UDP packets with the
+ * hardware calculated TCP/UDP checksum for the packet
+ * associated with this descriptor. The flag is ignored if the
+ * LSO flag is set. This bit must be valid on the first BD of a
+ * packet.
*/
#define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
/*
- * If set to 1, the controller replaces the IP checksum of the normal
- * packets, or the inner IP checksum of the encapsulated packets with
- * the hardware calculated IP checksum for the packet associated with
- * this descriptor. This bit must be valid on the first BD of a packet.
- */
- #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2)
- /*
- * If set to 1, the controller will not append an Ethernet CRC to the
- * end of the frame. This bit must be valid on the first BD of a packet.
- * Packet must be 64B or longer when this flag is set. It is not useful
- * to use this bit with any form of TX offload such as CSO or LSO. The
- * intent is that the packet from the host already has a valid Ethernet
- * CRC on the packet.
- */
- #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4)
- /*
- * If set to 1, the device will record the time at which the packet was
- * actually transmitted at the TX MAC. This bit must be valid on the
- * first BD of a packet.
- */
- #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8)
- /*
- * If set to 1, The controller replaces the tunnel IP checksum field
- * with hardware calculated IP checksum for the IP header of the packet
- * associated with this descriptor. In case of VXLAN, the controller
- * also replaces the outer header UDP checksum with hardware calculated
- * UDP checksum for the packet associated with this descriptor.
- */
- #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
- /*
- * If set to 1, the device will treat this packet with LSO(Large Send
- * Offload) processing for both normal or encapsulated packets, which is
- * a form of TCP segmentation. When this bit is 1, the hdr_size and mss
- * fields must be valid. The driver doesn't need to set t_ip_chksum,
- * ip_chksum, and tcp_udp_chksum flags since the controller will replace
- * the appropriate checksum fields for segmented packets. When this bit
- * is 1, the hdr_size and mss fields must be valid.
- */
- #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20)
- /*
- * If set to zero when LSO is '1', then the IPID will be treated as a
- * 16b number and will be wrapped if it exceeds a value of 0xffff. If
- * set to one when LSO is '1', then the IPID will be treated as a 15b
- * number and will be wrapped if it exceeds a value 0f 0x7fff.
- */
- #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
- /*
- * If set to zero when LSO is '1', then the IPID of the tunnel IP header
- * will not be modified during LSO operations. If set to one when LSO is
- * '1', then the IPID of the tunnel IP header will be incremented for
- * each subsequent segment of an LSO operation.
- */
- #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
- /*
- * If set to '1', then the RoCE ICRC will be appended to the packet.
- * Packet must be a valid RoCE format packet.
- */
- #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100)
- /*
- * If set to '1', then the FCoE CRC will be appended to the packet.
- * Packet must be a valid FCoE format packet.
- */
- #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200)
- uint16_t lflags;
-
- /*
- * When LSO is '1', this field must contain the offset of the TCP
- * payload from the beginning of the packet in as 16b words. In case of
- * encapsulated/tunneling packet, this field contains the offset of the
- * inner TCP payload from beginning of the packet as 16-bit words. This
- * value must be valid on the first BD of a packet.
- */
- #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff)
- #define TX_BD_LONG_HDR_SIZE_SFT 0
+ * If set to 1, the controller replaces the IP checksum of the
+ * normal packets, or the inner IP checksum of the encapsulated
+ * packets with the hardware calculated IP checksum for the
+ * packet associated with this descriptor. This bit must be
+ * valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2)
+ /*
+ * If set to 1, the controller will not append an Ethernet CRC
+ * to the end of the frame. This bit must be valid on the first
+ * BD of a packet. Packet must be 64B or longer when this flag
+ * is set. It is not useful to use this bit with any form of TX
+ * offload such as CSO or LSO. The intent is that the packet
+ * from the host already has a valid Ethernet CRC on the packet.
+ */
+ #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4)
+ /*
+ * If set to 1, the device will record the time at which the
+ * packet was actually transmitted at the TX MAC. This bit must
+ * be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8)
+ /*
+ * If set to 1, The controller replaces the tunnel IP checksum
+ * field with hardware calculated IP checksum for the IP header
+ * of the packet associated with this descriptor. For outer UDP
+ * checksum, global outer UDP checksum TE_NIC register needs to
+ * be enabled. If the global outer UDP checksum TE_NIC register
+ * bit is set, outer UDP checksum will be calculated for the
+ * following cases: 1. Packets with tcp_udp_chksum flag set to
+ * offload checksum for inner packet AND the inner packet is
+ * TCP/UDP. If the inner packet is ICMP for example (non-
+ * TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP
+ * checksum will not be calculated. 2. Packets with lso flag set
+ * which implies inner TCP checksum calculation as part of LSO
+ * operation.
+ */
+ #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
+ /*
+ * If set to 1, the device will treat this packet with LSO(Large
+ * Send Offload) processing for both normal or encapsulated
+ * packets, which is a form of TCP segmentation. When this bit
+ * is 1, the hdr_size and mss fields must be valid. The driver
+ * doesn't need to set t_ip_chksum, ip_chksum, and
+ * tcp_udp_chksum flags since the controller will replace the
+ * appropriate checksum fields for segmented packets. When this
+ * bit is 1, the hdr_size and mss fields must be valid.
+ */
+ #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20)
+ /*
+ * If set to zero when LSO is '1', then the IPID will be treated
+ * as a 16b number and will be wrapped if it exceeds a value of
+ * 0xffff. If set to one when LSO is '1', then the IPID will be
+ * treated as a 15b number and will be wrapped if it exceeds a
+ * value 0f 0x7fff.
+ */
+ #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
+ /*
+ * If set to zero when LSO is '1', then the IPID of the tunnel
+ * IP header will not be modified during LSO operations. If set
+ * to one when LSO is '1', then the IPID of the tunnel IP header
+ * will be incremented for each subsequent segment of an LSO
+ * operation. The flag is ignored if the LSO packet is a normal
+ * (non-tunneled) TCP packet.
+ */
+ #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80)
+ /*
+ * If set to '1', then the RoCE ICRC will be appended to the
+ * packet. Packet must be a valid RoCE format packet.
+ */
+ #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100)
+ /*
+ * If set to '1', then the FCoE CRC will be appended to the
+ * packet. Packet must be a valid FCoE format packet.
+ */
+ #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200)
uint16_t hdr_size;
-
/*
- * This is the MSS value that will be used to do the LSO processing. The
- * value is the length in bytes of the TCP payload for each segment
- * generated by the LSO operation. This value must be valid on the first
+ * When LSO is '1', this field must contain the offset of the
+ * TCP payload from the beginning of the packet in as 16b words.
+ * In case of encapsulated/tunneling packet, this field contains
+ * the offset of the inner TCP payload from beginning of the
+ * packet as 16-bit words. This value must be valid on the first
* BD of a packet.
*/
- #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff)
- #define TX_BD_LONG_MSS_SFT 0
+ #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff)
+ #define TX_BD_LONG_HDR_SIZE_SFT 0
uint32_t mss;
-
- uint16_t unused_2;
-
/*
- * This value selects a CFA action to perform on the packet. Set this
- * value to zero if no CFA action is desired. This value must be valid
- * on the first BD of a packet.
+ * This is the MSS value that will be used to do the LSO
+ * processing. The value is the length in bytes of the TCP
+ * payload for each segment generated by the LSO operation. This
+ * value must be valid on the first BD of a packet.
*/
+ #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff)
+ #define TX_BD_LONG_MSS_SFT 0
+ uint16_t unused_2;
uint16_t cfa_action;
-
/*
- * This value is action meta-data that defines CFA edit operations that
- * are done in addition to any action editing.
+ * This value selects a CFA action to perform on the packet. Set
+ * this value to zero if no CFA action is desired. This value
+ * must be valid on the first BD of a packet.
+ */
+ uint32_t cfa_meta;
+ /*
+ * This value is action meta-data that defines CFA edit
+ * operations that are done in addition to any action editing.
*/
/* When key=1, This is the VLAN tag VID value. */
#define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
#define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0
/* When key=1, This is the VLAN tag DE value. */
- #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000)
+ #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000)
/* When key=1, This is the VLAN tag PRI value. */
#define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
#define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13
/* When key=1, This is the VLAN tag TPID select value. */
#define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
#define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16
- /* 0x88a8 */
+ /* 0x88a8 */
#define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16)
- /* 0x8100 */
+ /* 0x8100 */
#define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16)
- /* 0x9100 */
+ /* 0x9100 */
#define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16)
- /* 0x9200 */
+ /* 0x9200 */
#define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16)
- /* 0x9300 */
+ /* 0x9300 */
#define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
- /* Value programmed in CFA VLANTPID register. */
+ /* Value programmed in CFA VLANTPID register. */
#define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
- #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \
- TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG
+ #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \
+ TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG
/* When key=1, This is the VLAN tag TPID select value. */
#define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
#define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19
/*
- * This field identifies the type of edit to be performed on the packet.
- * This value must be valid on the first BD of a packet.
- */
- #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000)
- #define TX_BD_LONG_CFA_META_KEY_SFT 28
- /* No editing */
- #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
- /*
- * - meta[17:16] - TPID select value (0 = 0x8100). - meta[15:12]
- * - PRI/DE value. - meta[11:0] - VID value.
- */
+ * This field identifies the type of edit to be performed on the
+ * packet. This value must be valid on the first BD of a packet.
+ */
+ #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000)
+ #define TX_BD_LONG_CFA_META_KEY_SFT 28
+ /* No editing */
+ #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
+ /*
+ * - meta[17:16] - TPID select value (0 =
+ * 0x8100). - meta[15:12] - PRI/DE value. -
+ * meta[11:0] - VID value.
+ */
#define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
#define TX_BD_LONG_CFA_META_KEY_LAST TX_BD_LONG_CFA_META_KEY_VLAN_TAG
- uint32_t cfa_meta;
} __attribute__((packed));
/* RX Producer Packet BD (16 bytes) */
struct rx_prod_pkt_bd {
+ uint16_t flags_type;
/* This value identifies the type of buffer descriptor. */
- #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f)
- #define RX_PROD_PKT_BD_TYPE_SFT 0
- /*
- * Indicates that this BD is 16B long and is an RX Producer (ie.
- * empty) buffer descriptor.
- */
- #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT (UINT32_C(0x4) << 0)
+ #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PROD_PKT_BD_TYPE_SFT 0
+ /*
+ * Indicates that this BD is 16B long and is an
+ * RX Producer (ie. empty) buffer descriptor.
+ */
+ #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT UINT32_C(0x4)
/*
- * If set to 1, the packet will be placed at the address plus 2B. The 2
- * Bytes of padding will be written as zero.
+ * If set to 1, the packet will be placed at the address plus
+ * 2B. The 2 Bytes of padding will be written as zero.
*/
/*
- * This is intended to be used when the host buffer is cache-line
- * aligned to produce packets that are easy to parse in host memory
- * while still allowing writes to be cache line aligned.
+ * This is intended to be used when the host buffer is cache-
+ * line aligned to produce packets that are easy to parse in
+ * host memory while still allowing writes to be cache line
+ * aligned.
*/
- #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40)
+ #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40)
/*
- * If set to 1, the packet write will be padded out to the nearest
- * cache-line with zero value padding.
+ * If set to 1, the packet write will be padded out to the
+ * nearest cache-line with zero value padding.
*/
/*
- * If receive buffers start/end on cache-line boundaries, this feature
- * will ensure that all data writes on the PCI bus start/end on cache
- * line boundaries.
+ * If receive buffers start/end on cache-line boundaries, this
+ * feature will ensure that all data writes on the PCI bus
+ * start/end on cache line boundaries.
*/
- #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80)
+ #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80)
/*
- * This value is the number of additional buffers in the ring that
- * describe the buffer space to be consumed for the this packet. If the
- * value is zero, then the packet must fit within the space described by
- * this BD. If this value is 1 or more, it indicates how many additional
- * "buffer" BDs are in the ring immediately following this BD to be used
- * for the same network packet. Even if the packet to be placed does not
- * need all the additional buffers, they will be consumed anyway.
+ * This value is the number of additional buffers in the ring
+ * that describe the buffer space to be consumed for the this
+ * packet. If the value is zero, then the packet must fit within
+ * the space described by this BD. If this value is 1 or more,
+ * it indicates how many additional "buffer" BDs are in the ring
+ * immediately following this BD to be used for the same network
+ * packet. Even if the packet to be placed does not need all the
+ * additional buffers, they will be consumed anyway.
*/
#define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300)
#define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8
- #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0)
- #define RX_PROD_PKT_BD_FLAGS_SFT 6
- uint16_t flags_type;
-
+ #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PROD_PKT_BD_FLAGS_SFT 6
+ uint16_t len;
/*
- * This is the length in Bytes of the host physical buffer where data
- * for the packet may be placed in host memory.
+ * This is the length in Bytes of the host physical buffer where
+ * data for the packet may be placed in host memory.
*/
/*
- * While this is a Byte resolution value, it is often advantageous to
- * ensure that the buffers provided end on a host cache line.
+ * While this is a Byte resolution value, it is often
+ * advantageous to ensure that the buffers provided end on a
+ * host cache line.
*/
- uint16_t len;
-
+ uint32_t opaque;
/*
- * The opaque data field is pass through to the completion and can be
- * used for any data that the driver wants to associate with this
- * receive buffer set.
+ * The opaque data field is pass through to the completion and
+ * can be used for any data that the driver wants to associate
+ * with this receive buffer set.
*/
- uint32_t opaque;
-
+ uint64_t addr;
/*
- * This is the host physical address where data for the packet may by
- * placed in host memory.
+ * This is the host physical address where data for the packet
+ * may by placed in host memory.
*/
/*
- * While this is a Byte resolution value, it is often advantageous to
- * ensure that the buffers provide start on a host cache line.
+ * While this is a Byte resolution value, it is often
+ * advantageous to ensure that the buffers provide start on a
+ * host cache line.
*/
- uint64_t addr;
} __attribute__((packed));
/* Completion Ring Structures */
/* Note: This structure is used by the HWRM to communicate HWRM Error. */
/* Base Completion Record (16 bytes) */
struct cmpl_base {
+ uint16_t type;
/* unused is 10 b */
/*
- * This field indicates the exact type of the completion. By convention,
- * the LSB identifies the length of the record in 16B units. Even values
- * indicate 16B records. Odd values indicate 32B records.
- */
- #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f)
- #define CMPL_BASE_TYPE_SFT 0
- /* TX L2 completion: Completion of TX packet. Length = 16B */
- #define CMPL_BASE_TYPE_TX_L2 (UINT32_C(0x0) << 0)
- /*
- * RX L2 completion: Completion of and L2 RX packet.
- * Length = 32B
- */
- #define CMPL_BASE_TYPE_RX_L2 (UINT32_C(0x11) << 0)
- /*
- * RX Aggregation Buffer completion : Completion of an L2
- * aggregation buffer in support of TPA, HDS, or Jumbo packet
- * completion. Length = 16B
- */
- #define CMPL_BASE_TYPE_RX_AGG (UINT32_C(0x12) << 0)
- /*
- * RX L2 TPA Start Completion: Completion at the beginning of a
- * TPA operation. Length = 32B
- */
- #define CMPL_BASE_TYPE_RX_TPA_START (UINT32_C(0x13) << 0)
- /*
- * RX L2 TPA End Completion: Completion at the end of a TPA
- * operation. Length = 32B
- */
- #define CMPL_BASE_TYPE_RX_TPA_END (UINT32_C(0x15) << 0)
- /*
- * Statistics Ejection Completion: Completion of statistics data
- * ejection buffer. Length = 16B
- */
- #define CMPL_BASE_TYPE_STAT_EJECT (UINT32_C(0x1a) << 0)
- /* HWRM Command Completion: Completion of an HWRM command. */
- #define CMPL_BASE_TYPE_HWRM_DONE (UINT32_C(0x20) << 0)
- /* Forwarded HWRM Request */
- #define CMPL_BASE_TYPE_HWRM_FWD_REQ (UINT32_C(0x22) << 0)
- /* Forwarded HWRM Response */
- #define CMPL_BASE_TYPE_HWRM_FWD_RESP (UINT32_C(0x24) << 0)
- /* HWRM Asynchronous Event Information */
- #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (UINT32_C(0x2e) << 0)
- /* CQ Notification */
- #define CMPL_BASE_TYPE_CQ_NOTIFICATION (UINT32_C(0x30) << 0)
- /* SRQ Threshold Event */
- #define CMPL_BASE_TYPE_SRQ_EVENT (UINT32_C(0x32) << 0)
- /* DBQ Threshold Event */
- #define CMPL_BASE_TYPE_DBQ_EVENT (UINT32_C(0x34) << 0)
- /* QP Async Notification */
- #define CMPL_BASE_TYPE_QP_EVENT (UINT32_C(0x38) << 0)
- /* Function Async Notification */
- #define CMPL_BASE_TYPE_FUNC_EVENT (UINT32_C(0x3a) << 0)
- uint16_t type;
-
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
+ */
+ #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f)
+ #define CMPL_BASE_TYPE_SFT 0
+ /* TX L2 completion: Completion of TX packet. Length = 16B */
+ #define CMPL_BASE_TYPE_TX_L2 UINT32_C(0x0)
+ /*
+ * RX L2 completion: Completion of and L2 RX
+ * packet. Length = 32B
+ */
+ #define CMPL_BASE_TYPE_RX_L2 UINT32_C(0x11)
+ /*
+ * RX Aggregation Buffer completion : Completion
+ * of an L2 aggregation buffer in support of
+ * TPA, HDS, or Jumbo packet completion. Length
+ * = 16B
+ */
+ #define CMPL_BASE_TYPE_RX_AGG UINT32_C(0x12)
+ /*
+ * RX L2 TPA Start Completion: Completion at the
+ * beginning of a TPA operation. Length = 32B
+ */
+ #define CMPL_BASE_TYPE_RX_TPA_START UINT32_C(0x13)
+ /*
+ * RX L2 TPA End Completion: Completion at the
+ * end of a TPA operation. Length = 32B
+ */
+ #define CMPL_BASE_TYPE_RX_TPA_END UINT32_C(0x15)
+ /*
+ * Statistics Ejection Completion: Completion of
+ * statistics data ejection buffer. Length = 16B
+ */
+ #define CMPL_BASE_TYPE_STAT_EJECT UINT32_C(0x1a)
+ /* HWRM Command Completion: Completion of an HWRM command. */
+ #define CMPL_BASE_TYPE_HWRM_DONE UINT32_C(0x20)
+ /* Forwarded HWRM Request */
+ #define CMPL_BASE_TYPE_HWRM_FWD_REQ UINT32_C(0x22)
+ /* Forwarded HWRM Response */
+ #define CMPL_BASE_TYPE_HWRM_FWD_RESP UINT32_C(0x24)
+ /* HWRM Asynchronous Event Information */
+ #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ /* CQ Notification */
+ #define CMPL_BASE_TYPE_CQ_NOTIFICATION UINT32_C(0x30)
+ /* SRQ Threshold Event */
+ #define CMPL_BASE_TYPE_SRQ_EVENT UINT32_C(0x32)
+ /* DBQ Threshold Event */
+ #define CMPL_BASE_TYPE_DBQ_EVENT UINT32_C(0x34)
+ /* QP Async Notification */
+ #define CMPL_BASE_TYPE_QP_EVENT UINT32_C(0x38)
+ /* Function Async Notification */
+ #define CMPL_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a)
+ /* unused is 10 b */
uint16_t info1;
+ /* info1 is 16 b */
uint32_t info2;
-
+ /* info2 is 32 b */
+ uint32_t info3_v;
+ /* info3 is 31 b */
/*
- * This value is written by the NIC such that it will be different for
- * each pass through the completion queue. The even passes will write 1.
- * The odd passes will write 0.
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
*/
- #define CMPL_BASE_V UINT32_C(0x1)
+ #define CMPL_BASE_V UINT32_C(0x1)
/* info3 is 31 b */
- #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe)
- #define CMPL_BASE_INFO3_SFT 1
- uint32_t info3_v;
-
+ #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe)
+ #define CMPL_BASE_INFO3_SFT 1
uint32_t info4;
+ /* info4 is 32 b */
} __attribute__((packed));
/* TX Completion Record (16 bytes) */
struct tx_cmpl {
+ uint16_t flags_type;
/*
- * This field indicates the exact type of the completion. By convention,
- * the LSB identifies the length of the record in 16B units. Even values
- * indicate 16B records. Odd values indicate 32B records.
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
*/
- #define TX_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define TX_CMPL_TYPE_SFT 0
- /* TX L2 completion: Completion of TX packet. Length = 16B */
- #define TX_CMPL_TYPE_TX_L2 (UINT32_C(0x0) << 0)
+ #define TX_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define TX_CMPL_TYPE_SFT 0
+ /* TX L2 completion: Completion of TX packet. Length = 16B */
+ #define TX_CMPL_TYPE_TX_L2 UINT32_C(0x0)
/*
- * When this bit is '1', it indicates a packet that has an error of some
- * type. Type of error is indicated in error_flags.
+ * When this bit is '1', it indicates a packet that has an error
+ * of some type. Type of error is indicated in error_flags.
*/
- #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40)
/*
- * When this bit is '1', it indicates that the packet completed was
- * transmitted using the push acceleration data provided by the driver.
- * When this bit is '0', it indicates that the packet had not push
- * acceleration data written or was executed as a normal packet even
- * though push data was provided.
+ * When this bit is '1', it indicates that the packet completed
+ * was transmitted using the push acceleration data provided by
+ * the driver. When this bit is '0', it indicates that the
+ * packet had not push acceleration data written or was executed
+ * as a normal packet even though push data was provided.
*/
- #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80)
- #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0)
- #define TX_CMPL_FLAGS_SFT 6
- uint16_t flags_type;
-
+ #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80)
+ #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_CMPL_FLAGS_SFT 6
uint16_t unused_0;
-
+ /* unused1 is 16 b */
+ uint32_t opaque;
/*
- * This is a copy of the opaque field from the first TX BD of this
- * transmitted packet.
+ * This is a copy of the opaque field from the first TX BD of
+ * this transmitted packet.
*/
- uint32_t opaque;
-
+ uint16_t errors_v;
/*
- * This value is written by the NIC such that it will be different for
- * each pass through the completion queue. The even passes will write 1.
- * The odd passes will write 0.
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
*/
- #define TX_CMPL_V UINT32_C(0x1)
+ #define TX_CMPL_V UINT32_C(0x1)
/*
- * This error indicates that there was some sort of problem with the BDs
- * for the packet.
+ * This error indicates that there was some sort of problem with
+ * the BDs for the packet.
*/
#define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
- #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1
- /* No error */
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ /* No error */
#define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1)
- /* Bad Format: BDs were not formatted correctly. */
+ /* Bad Format: BDs were not formatted correctly. */
#define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1)
- #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \
- TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT
+ #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT
/*
- * When this bit is '1', it indicates that the length of the packet was
- * zero. No packet was transmitted.
+ * When this bit is '1', it indicates that the length of the
+ * packet was zero. No packet was transmitted.
*/
- #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10)
+ #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10)
/*
- * When this bit is '1', it indicates that the packet was longer than
- * the programmed limit in TDI. No packet was transmitted.
+ * When this bit is '1', it indicates that the packet was longer
+ * than the programmed limit in TDI. No packet was transmitted.
*/
#define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20)
/*
- * When this bit is '1', it indicates that one or more of the BDs
- * associated with this packet generated a PCI error. This probably
- * means the address was not valid.
+ * When this bit is '1', it indicates that one or more of the
+ * BDs associated with this packet generated a PCI error. This
+ * probably means the address was not valid.
*/
- #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40)
+ #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40)
/*
- * When this bit is '1', it indicates that the packet was longer than
- * indicated by the hint. No packet was transmitted.
+ * When this bit is '1', it indicates that the packet was longer
+ * than indicated by the hint. No packet was transmitted.
*/
- #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80)
+ #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80)
/*
- * When this bit is '1', it indicates that the packet was dropped due to
- * Poison TLP error on one or more of the TLPs in the PXP completion.
+ * When this bit is '1', it indicates that the packet was
+ * dropped due to Poison TLP error on one or more of the TLPs in
+ * the PXP completion.
*/
- #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100)
- #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe)
- #define TX_CMPL_ERRORS_SFT 1
- uint16_t errors_v;
-
+ #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100)
+ #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define TX_CMPL_ERRORS_SFT 1
uint16_t unused_1;
+ /* unused2 is 16 b */
uint32_t unused_2;
-} __attribute__((packed)) tx_cmpl_t, *ptx_cmpl_t;
+ /* unused3 is 32 b */
+} __attribute__((packed));
/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */
struct rx_pkt_cmpl {
+ uint16_t flags_type;
/*
- * This field indicates the exact type of the completion. By convention,
- * the LSB identifies the length of the record in 16B units. Even values
- * indicate 16B records. Odd values indicate 32B records.
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
*/
- #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define RX_PKT_CMPL_TYPE_SFT 0
- /*
- * RX L2 completion: Completion of and L2 RX packet.
- * Length = 32B
- */
- #define RX_PKT_CMPL_TYPE_RX_L2 (UINT32_C(0x11) << 0)
+ #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_PKT_CMPL_TYPE_SFT 0
/*
- * When this bit is '1', it indicates a packet that has an error of some
- * type. Type of error is indicated in error_flags.
+ * RX L2 completion: Completion of and L2 RX
+ * packet. Length = 32B
*/
- #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ /*
+ * When this bit is '1', it indicates a packet that has an error
+ * of some type. Type of error is indicated in error_flags.
+ */
+ #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40)
/* This field indicates how the packet was placed in the buffer. */
#define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7
- /* Normal: Packet was placed using normal algorithm. */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7
+ /* Normal: Packet was placed using normal algorithm. */
#define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7)
- /* Jumbo: Packet was placed using jumbo algorithm. */
+ /* Jumbo: Packet was placed using jumbo algorithm. */
#define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
- /*
- * Header/Data Separation: Packet was placed using Header/Data
- * separation algorithm. The separation location is indicated by
- * the itype field.
- */
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
- #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \
- RX_PKT_CMPL_FLAGS_PLACEMENT_HDS
+ /*
+ * Header/Data Separation: Packet was placed
+ * using Header/Data separation algorithm. The
+ * separation location is indicated by the itype
+ * field.
+ */
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST RX_PKT_CMPL_FLAGS_PLACEMENT_HDS
/* This bit is '1' if the RSS field in this completion is valid. */
- #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ /* unused is 1 b */
/*
- * This value indicates what the inner packet determined for the packet
- * was.
+ * This value indicates what the inner packet determined for the
+ * packet was.
*/
- #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
- #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12
- /* Not Known: Indicates that the packet type was not known. */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12
+ /* Not Known: Indicates that the packet type was not known. */
#define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN (UINT32_C(0x0) << 12)
- /*
- * IP Packet: Indicates that the packet was an IP packet, but
- * further classification was not possible.
- */
- #define RX_PKT_CMPL_FLAGS_ITYPE_IP (UINT32_C(0x1) << 12)
- /*
- * TCP Packet: Indicates that the packet was IP and TCP. This
- * indicates that the payload_offset field is valid.
- */
- #define RX_PKT_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
- /*
- * UDP Packet: Indicates that the packet was IP and UDP. This
- * indicates that the payload_offset field is valid.
- */
- #define RX_PKT_CMPL_FLAGS_ITYPE_UDP (UINT32_C(0x3) << 12)
- /*
- * FCoE Packet: Indicates that the packet was recognized as a
- * FCoE. This also indicates that the payload_offset field is
- * valid.
- */
- #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (UINT32_C(0x4) << 12)
- /*
- * RoCE Packet: Indicates that the packet was recognized as a
- * RoCE. This also indicates that the payload_offset field is
- * valid.
- */
- #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (UINT32_C(0x5) << 12)
- /*
- * ICMP Packet: Indicates that the packet was recognized as
- * ICMP. This indicates that the payload_offset field is valid.
- */
- #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (UINT32_C(0x7) << 12)
- /*
- * PtP packet wo/timestamp: Indicates that the packet was
- * recognized as a PtP packet.
- */
- #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP \
- (UINT32_C(0x8) << 12)
- /*
- * PtP packet w/timestamp: Indicates that the packet was
- * recognized as a PtP packet and that a timestamp was taken for
- * the packet.
- */
- #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12)
- #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \
- RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
- #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
- #define RX_PKT_CMPL_FLAGS_SFT 6
- uint16_t flags_type;
-
/*
- * This is the length of the data for the packet stored in the buffer(s)
- * identified by the opaque value. This includes the packet BD and any
- * associated buffer BDs. This does not include the the length of any
- * data places in aggregation BDs.
+ * IP Packet: Indicates that the packet was an
+ * IP packet, but further classification was not
+ * possible.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_IP (UINT32_C(0x1) << 12)
+ /*
+ * TCP Packet: Indicates that the packet was IP
+ * and TCP. This indicates that the
+ * payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
+ /*
+ * UDP Packet: Indicates that the packet was IP
+ * and UDP. This indicates that the
+ * payload_offset field is valid.
*/
+ #define RX_PKT_CMPL_FLAGS_ITYPE_UDP (UINT32_C(0x3) << 12)
+ /*
+ * FCoE Packet: Indicates that the packet was
+ * recognized as a FCoE. This also indicates
+ * that the payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (UINT32_C(0x4) << 12)
+ /*
+ * RoCE Packet: Indicates that the packet was
+ * recognized as a RoCE. This also indicates
+ * that the payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (UINT32_C(0x5) << 12)
+ /*
+ * ICMP Packet: Indicates that the packet was
+ * recognized as ICMP. This indicates that the
+ * payload_offset field is valid.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (UINT32_C(0x7) << 12)
+ /*
+ * PtP packet wo/timestamp: Indicates that the
+ * packet was recognized as a PtP packet.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP (UINT32_C(0x8) << 12)
+ /*
+ * PtP packet w/timestamp: Indicates that the
+ * packet was recognized as a PtP packet and
+ * that a timestamp was taken for the packet.
+ */
+ #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12)
+ #define RX_PKT_CMPL_FLAGS_ITYPE_LAST RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
+ #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_PKT_CMPL_FLAGS_SFT 6
uint16_t len;
-
/*
- * This is a copy of the opaque field from the RX BD this completion
- * corresponds to.
+ * This is the length of the data for the packet stored in the
+ * buffer(s) identified by the opaque value. This includes the
+ * packet BD and any associated buffer BDs. This does not
+ * include the the length of any data places in aggregation BDs.
*/
uint32_t opaque;
-
/*
- * This value is written by the NIC such that it will be different for
- * each pass through the completion queue. The even passes will write 1.
- * The odd passes will write 0.
+ * This is a copy of the opaque field from the RX BD this
+ * completion corresponds to.
*/
- #define RX_PKT_CMPL_V1 UINT32_C(0x1)
+ uint8_t agg_bufs_v1;
+ /* unused1 is 2 b */
/*
- * This value is the number of aggregation buffers that follow this
- * entry in the completion ring that are a part of this packet. If the
- * value is zero, then the packet is completely contained in the buffer
- * space provided for the packet in the RX ring.
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
*/
- #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e)
- #define RX_PKT_CMPL_AGG_BUFS_SFT 1
- uint8_t agg_bufs_v1;
-
+ #define RX_PKT_CMPL_V1 UINT32_C(0x1)
/*
- * This is the RSS hash type for the packet. The value is packed
- * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}.
+ * This value is the number of aggregation buffers that follow
+ * this entry in the completion ring that are a part of this
+ * packet. If the value is zero, then the packet is completely
+ * contained in the buffer space provided for the packet in the
+ * RX ring.
*/
+ #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e)
+ #define RX_PKT_CMPL_AGG_BUFS_SFT 1
+ /* unused1 is 2 b */
uint8_t rss_hash_type;
-
/*
- * This value indicates the offset from the beginning of the packet
- * where the inner payload starts. This value is valid for TCP, UDP,
- * FCoE, and RoCE packets.
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}
+ * . The value of tuple_extrac_op provides the information about
+ * what fields the hash was computed on. * 0: The RSS hash was
+ * computed over source IP address, destination IP address,
+ * source port, and destination port of inner IP and TCP or UDP
+ * headers. Note: For non-tunneled packets, the packet headers
+ * are considered inner packet headers for the RSS hash
+ * computation purpose. * 1: The RSS hash was computed over
+ * source IP address and destination IP address of inner IP
+ * header. Note: For non-tunneled packets, the packet headers
+ * are considered inner packet headers for the RSS hash
+ * computation purpose. * 2: The RSS hash was computed over
+ * source IP address, destination IP address, source port, and
+ * destination port of IP and TCP or UDP headers of outer tunnel
+ * headers. Note: For non-tunneled packets, this value is not
+ * applicable. * 3: The RSS hash was computed over source IP
+ * address and destination IP address of IP header of outer
+ * tunnel headers. Note: For non-tunneled packets, this value is
+ * not applicable. Note that 4-tuples values listed above are
+ * applicable for layer 4 protocols supported and enabled for
+ * RSS in the hardware, HWRM firmware, and drivers. For example,
+ * if RSS hash is supported and enabled for TCP traffic only,
+ * then the values of tuple_extract_op corresponding to 4-tuples
+ * are only valid for TCP traffic.
*/
uint8_t payload_offset;
-
- uint8_t unused_1;
-
/*
- * This value is the RSS hash value calculated for the packet based on
- * the mode bits and key value in the VNIC.
+ * This value indicates the offset in bytes from the beginning
+ * of the packet where the inner payload starts. This value is
+ * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero
+ * indicates that header is 256B into the packet.
*/
+ uint8_t unused_1;
+ /* unused2 is 8 b */
uint32_t rss_hash;
+ /*
+ * This value is the RSS hash value calculated for the packet
+ * based on the mode bits and key value in the VNIC.
+ */
} __attribute__((packed));
/* last 16 bytes of RX Packet Completion Record */
struct rx_pkt_cmpl_hi {
+ uint32_t flags2;
/*
- * This indicates that the ip checksum was calculated for the inner
- * packet and that the ip_cs_error field indicates if there was an
- * error.
+ * This indicates that the ip checksum was calculated for the
+ * inner packet and that the ip_cs_error field indicates if
+ * there was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
/*
- * This indicates that the TCP, UDP or ICMP checksum was calculated for
- * the inner packet and that the l4_cs_error field indicates if there
- * was an error.
+ * This indicates that the TCP, UDP or ICMP checksum was
+ * calculated for the inner packet and that the l4_cs_error
+ * field indicates if there was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
/*
- * This indicates that the ip checksum was calculated for the tunnel
- * header and that the t_ip_cs_error field indicates if there was an
- * error.
+ * This indicates that the ip checksum was calculated for the
+ * tunnel header and that the t_ip_cs_error field indicates if
+ * there was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
/*
- * This indicates that the UDP checksum was calculated for the tunnel
- * packet and that the t_l4_cs_error field indicates if there was an
- * error.
+ * This indicates that the UDP checksum was calculated for the
+ * tunnel packet and that the t_l4_cs_error field indicates if
+ * there was an error.
*/
- #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
/* This value indicates what format the metadata field is. */
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4
- /* No metadata informtaion. Value is zero. */
+ /* No metadata informtaion. Value is zero. */
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
- /*
- * The metadata field contains the VLAN tag and TPID value. -
- * metadata[11:0] contains the vlan VID value. - metadata[12]
- * contains the vlan DE value. - metadata[15:13] contains the
- * vlan PRI value. - metadata[31:16] contains the vlan TPID
- * value.
- */
+ /*
+ * The metadata field contains the VLAN tag and
+ * TPID value. - metadata[11:0] contains the
+ * vlan VID value. - metadata[12] contains the
+ * vlan DE value. - metadata[15:13] contains the
+ * vlan PRI value. - metadata[31:16] contains
+ * the vlan TPID value.
+ */
#define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
- #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \
- RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
+ #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \
+ RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
/*
- * This field indicates the IP type for the inner-most IP header. A
- * value of '0' indicates IPv4. A value of '1' indicates IPv6. This
- * value is only valid if itype indicates a packet with an IP header.
+ * This field indicates the IP type for the inner-most IP
+ * header. A value of '0' indicates IPv4. A value of '1'
+ * indicates IPv6. This value is only valid if itype indicates a
+ * packet with an IP header.
*/
- #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
- uint32_t flags2;
-
+ #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
+ uint32_t metadata;
/*
- * This is data from the CFA block as indicated by the meta_format
- * field.
+ * This is data from the CFA block as indicated by the
+ * meta_format field.
*/
/* When meta_format=1, this value is the VLAN VID. */
- #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
- #define RX_PKT_CMPL_METADATA_VID_SFT 0
+ #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_PKT_CMPL_METADATA_VID_SFT 0
/* When meta_format=1, this value is the VLAN DE. */
- #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000)
+ #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000)
/* When meta_format=1, this value is the VLAN PRI. */
- #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
- #define RX_PKT_CMPL_METADATA_PRI_SFT 13
+ #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
+ #define RX_PKT_CMPL_METADATA_PRI_SFT 13
/* When meta_format=1, this value is the VLAN TPID. */
- #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
- #define RX_PKT_CMPL_METADATA_TPID_SFT 16
- uint32_t metadata;
-
+ #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_PKT_CMPL_METADATA_TPID_SFT 16
+ uint16_t errors_v2;
/*
- * This value is written by the NIC such that it will be different for
- * each pass through the completion queue. The even passes will write 1.
- * The odd passes will write 0.
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
*/
- #define RX_PKT_CMPL_V2 UINT32_C(0x1)
+ #define RX_PKT_CMPL_V2 UINT32_C(0x1)
/*
- * This error indicates that there was some sort of problem with the BDs
- * for the packet that was found after part of the packet was already
- * placed. The packet should be treated as invalid.
+ * This error indicates that there was some sort of problem with
+ * the BDs for the packet that was found after part of the
+ * packet was already placed. The packet should be treated as
+ * invalid.
*/
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
#define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1
- /* No buffer error */
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \
- (UINT32_C(0x0) << 1)
- /*
- * Did Not Fit: Packet did not fit into packet buffer provided.
- * For regular placement, this means the packet did not fit in
- * the buffer provided. For HDS and jumbo placement, this means
- * that the packet could not be placed into 7 physical buffers
- * or less.
- */
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \
- (UINT32_C(0x1) << 1)
- /*
- * Not On Chip: All BDs needed for the packet were not on-chip
- * when the packet arrived.
- */
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
- (UINT32_C(0x2) << 1)
- /* Bad Format: BDs were not formatted correctly. */
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \
- (UINT32_C(0x3) << 1)
- #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \
- RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT
+ /* No buffer error */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (UINT32_C(0x0) << 1)
+ /*
+ * Did Not Fit: Packet did not fit into packet
+ * buffer provided. For regular placement, this
+ * means the packet did not fit in the buffer
+ * provided. For HDS and jumbo placement, this
+ * means that the packet could not be placed
+ * into 7 physical buffers or less.
+ */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (UINT32_C(0x1) << 1)
+ /*
+ * Not On Chip: All BDs needed for the packet
+ * were not on-chip when the packet arrived.
+ */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (UINT32_C(0x2) << 1)
+ /* Bad Format: BDs were not formatted correctly. */
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (UINT32_C(0x3) << 1)
+ #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT
/* This indicates that there was an error in the IP header checksum. */
- #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10)
+ #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10)
/*
- * This indicates that there was an error in the TCP, UDP or ICMP
- * checksum.
+ * This indicates that there was an error in the TCP, UDP or
+ * ICMP checksum.
*/
- #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR UINT32_C(0x20)
+ #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR UINT32_C(0x20)
/*
- * This indicates that there was an error in the tunnel IP header
- * checksum.
+ * This indicates that there was an error in the tunnel IP
+ * header checksum.
*/
#define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR UINT32_C(0x40)
- /* This indicates that there was an error in the tunnel UDP checksum. */
+ /*
+ * This indicates that there was an error in the tunnel UDP
+ * checksum.
+ */
#define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR UINT32_C(0x80)
/*
- * This indicates that there was a CRC error on either an FCoE or RoCE
- * packet. The itype indicates the packet type.
+ * This indicates that there was a CRC error on either an FCoE
+ * or RoCE packet. The itype indicates the packet type.
*/
- #define RX_PKT_CMPL_ERRORS_CRC_ERROR UINT32_C(0x100)
+ #define RX_PKT_CMPL_ERRORS_CRC_ERROR UINT32_C(0x100)
/*
- * This indicates that there was an error in the tunnel portion of the
- * packet when this field is non-zero.
+ * This indicates that there was an error in the tunnel portion
+ * of the packet when this field is non-zero.
*/
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK UINT32_C(0xe00)
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9
- /*
- * No additional error occurred on the tunnel portion of the
- * packet of the packet does not have a tunnel.
- */
+ /*
+ * No additional error occurred on the tunnel
+ * portion of the packet of the packet does not
+ * have a tunnel.
+ */
#define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 9)
- /*
- * Indicates that IP header version does not match expectation
- * from L2 Ethertype for IPv4 and IPv6 in the tunnel header.
- */
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \
- (UINT32_C(0x1) << 9)
- /*
- * Indicates that header length is out of range in the tunnel
- * header. Valid for IPv4.
- */
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \
- (UINT32_C(0x2) << 9)
- /*
- * Indicates that the physical packet is shorter than that
- * claimed by the PPPoE header length for a tunnel PPPoE packet.
- */
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \
- (UINT32_C(0x3) << 9)
- /*
- * Indicates that physical packet is shorter than that claimed
- * by the tunnel l3 header length. Valid for IPv4, or IPv6
- * tunnel packet packets.
- */
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \
- (UINT32_C(0x4) << 9)
- /*
- * Indicates that the physical packet is shorter than that
- * claimed by the tunnel UDP header length for a tunnel UDP
- * packet that is not fragmented.
- */
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \
- (UINT32_C(0x5) << 9)
- /*
- * indicates that the IPv4 TTL or IPv6 hop limit check have
- * failed (e.g. TTL = 0) in the tunnel header. Valid for IPv4,
- * and IPv6.
- */
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \
- (UINT32_C(0x6) << 9)
- #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \
- RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
- /*
- * This indicates that there was an error in the inner portion of the
- * packet when this field is non-zero.
+ /*
+ * Indicates that IP header version does not
+ * match expectation from L2 Ethertype for IPv4
+ * and IPv6 in the tunnel header.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (UINT32_C(0x1) << 9)
+ /*
+ * Indicates that header length is out of range
+ * in the tunnel header. Valid for IPv4.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (UINT32_C(0x2) << 9)
+ /*
+ * Indicates that the physical packet is shorter
+ * than that claimed by the PPPoE header length
+ * for a tunnel PPPoE packet.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR (UINT32_C(0x3) << 9)
+ /*
+ * Indicates that physical packet is shorter
+ * than that claimed by the tunnel l3 header
+ * length. Valid for IPv4, or IPv6 tunnel packet
+ * packets.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (UINT32_C(0x4) << 9)
+ /*
+ * Indicates that the physical packet is shorter
+ * than that claimed by the tunnel UDP header
+ * length for a tunnel UDP packet that is not
+ * fragmented.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (UINT32_C(0x5) << 9)
+ /*
+ * indicates that the IPv4 TTL or IPv6 hop limit
+ * check have failed (e.g. TTL = 0) in the
+ * tunnel header. Valid for IPv4, and IPv6.
+ */
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (UINT32_C(0x6) << 9)
+ #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \
+ RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL
+ /*
+ * This indicates that there was an error in the inner portion
+ * of the packet when this field is non-zero.
*/
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK UINT32_C(0xf000)
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12
- /*
- * No additional error occurred on the tunnel portion of the
- * packet of the packet does not have a tunnel.
- */
+ /*
+ * No additional error occurred on the tunnel
+ * portion of the packet of the packet does not
+ * have a tunnel.
+ */
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 12)
- /*
- * Indicates that IP header version does not match expectation
- * from L2 Ethertype for IPv4 and IPv6 or that option other than
- * VFT was parsed on FCoE packet.
- */
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \
- (UINT32_C(0x1) << 12)
- /*
- * indicates that header length is out of range. Valid for IPv4
- * and RoCE
- */
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \
- (UINT32_C(0x2) << 12)
- /*
- * indicates that the IPv4 TTL or IPv6 hop limit check have
- * failed (e.g. TTL = 0). Valid for IPv4, and IPv6
- */
+ /*
+ * Indicates that IP header version does not
+ * match expectation from L2 Ethertype for IPv4
+ * and IPv6 or that option other than VFT was
+ * parsed on FCoE packet.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION (UINT32_C(0x1) << 12)
+ /*
+ * indicates that header length is out of range.
+ * Valid for IPv4 and RoCE
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (UINT32_C(0x2) << 12)
+ /*
+ * indicates that the IPv4 TTL or IPv6 hop limit
+ * check have failed (e.g. TTL = 0). Valid for
+ * IPv4, and IPv6
+ */
#define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12)
- /*
- * Indicates that physical packet is shorter than that claimed
- * by the l3 header length. Valid for IPv4, IPv6 packet or RoCE
- * packets.
- */
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \
- (UINT32_C(0x4) << 12)
- /*
- * Indicates that the physical packet is shorter than that
- * claimed by the UDP header length for a UDP packet that is not
- * fragmented.
- */
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \
- (UINT32_C(0x5) << 12)
- /*
- * Indicates that TCP header length > IP payload. Valid for TCP
- * packets only.
- */
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \
- (UINT32_C(0x6) << 12)
- /* Indicates that TCP header length < 5. Valid for TCP. */
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \
- (UINT32_C(0x7) << 12)
- /*
- * Indicates that TCP option headers result in a TCP header size
- * that does not match data offset in TCP header. Valid for TCP.
- */
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
- (UINT32_C(0x8) << 12)
- #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \
- RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
- #define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe)
- #define RX_PKT_CMPL_ERRORS_SFT 1
- uint16_t errors_v2;
-
/*
- * This field identifies the CFA action rule that was used for this
- * packet.
+ * Indicates that physical packet is shorter
+ * than that claimed by the l3 header length.
+ * Valid for IPv4, IPv6 packet or RoCE packets.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (UINT32_C(0x4) << 12)
+ /*
+ * Indicates that the physical packet is shorter
+ * than that claimed by the UDP header length
+ * for a UDP packet that is not fragmented.
*/
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (UINT32_C(0x5) << 12)
+ /*
+ * Indicates that TCP header length > IP
+ * payload. Valid for TCP packets only.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (UINT32_C(0x6) << 12)
+ /* Indicates that TCP header length < 5. Valid for TCP. */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \
+ (UINT32_C(0x7) << 12)
+ /*
+ * Indicates that TCP option headers result in a
+ * TCP header size that does not match data
+ * offset in TCP header. Valid for TCP.
+ */
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \
+ (UINT32_C(0x8) << 12)
+ #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \
+ RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
+ #define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define RX_PKT_CMPL_ERRORS_SFT 1
uint16_t cfa_code;
-
/*
- * This value holds the reordering sequence number for the packet. If
- * the reordering sequence is not valid, then this value is zero. The
- * reordering domain for the packet is in the bottom 8 to 10b of the
- * rss_hash value. The bottom 20b of this value contain the ordering
- * domain value for the packet.
+ * This field identifies the CFA action rule that was used for
+ * this packet.
*/
- #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff)
- #define RX_PKT_CMPL_REORDER_SFT 0
uint32_t reorder;
+ /*
+ * This value holds the reordering sequence number for the
+ * packet. If the reordering sequence is not valid, then this
+ * value is zero. The reordering domain for the packet is in the
+ * bottom 8 to 10b of the rss_hash value. The bottom 20b of this
+ * value contain the ordering domain value for the packet.
+ */
+ #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff)
+ #define RX_PKT_CMPL_REORDER_SFT 0
} __attribute__((packed));
/* HWRM Forwarded Request (16 bytes) */
struct hwrm_fwd_req_cmpl {
+ uint16_t req_len_type;
/* Length of forwarded request in bytes. */
/*
- * This field indicates the exact type of the completion. By convention,
- * the LSB identifies the length of the record in 16B units. Even values
- * indicate 16B records. Odd values indicate 32B records.
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
*/
- #define HWRM_FWD_REQ_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0
- /* Forwarded HWRM Request */
- #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (UINT32_C(0x22) << 0)
+ #define HWRM_FWD_INPUT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_FWD_INPUT_CMPL_TYPE_SFT 0
+ /* Forwarded HWRM Request */
+ #define HWRM_FWD_INPUT_CMPL_TYPE_HWRM_FWD_INPUT UINT32_C(0x22)
/* Length of forwarded request in bytes. */
- #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0)
- #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
- uint16_t req_len_type;
-
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0)
+ #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6
+ uint16_t source_id;
/*
- * Source ID of this request. Typically used in forwarding requests and
- * responses. 0x0 - 0xFFF8 - Used for function ids 0xFFF8 - 0xFFFE -
- * Reserved for internal processors 0xFFFF - HWRM
+ * Source ID of this request. Typically used in forwarding
+ * requests and responses. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF -
+ * HWRM
*/
- uint16_t source_id;
-
uint32_t unused_0;
-
+ /* unused1 is 32 b */
+ uint32_t req_buf_addr_v[2];
/* Address of forwarded request. */
/*
- * This value is written by the NIC such that it will be different for
- * each pass through the completion queue. The even passes will write 1.
- * The odd passes will write 0.
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
*/
- #define HWRM_FWD_REQ_CMPL_V UINT32_C(0x1)
+ #define HWRM_FWD_INPUT_CMPL_V UINT32_C(0x1)
/* Address of forwarded request. */
#define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe)
#define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
- uint64_t req_buf_addr_v;
} __attribute__((packed));
/* HWRM Asynchronous Event Completion Record (16 bytes) */
struct hwrm_async_event_cmpl {
- /*
- * This field indicates the exact type of the completion. By convention,
- * the LSB identifies the length of the record in 16B units. Even values
- * indicate 16B records. Odd values indicate 32B records.
- */
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f)
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
- /* HWRM Asynchronous Event Information */
- #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT \
- (UINT32_C(0x2e) << 0)
uint16_t type;
-
- /* Identifiers of events. */
- /* Link status changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
- (UINT32_C(0x0) << 0)
- /* Link MTU changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE \
- (UINT32_C(0x1) << 0)
- /* Link speed changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE \
- (UINT32_C(0x2) << 0)
- /* DCB Configuration changed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE \
- (UINT32_C(0x3) << 0)
- /* Port connection not allowed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
- (UINT32_C(0x4) << 0)
- /* Link speed configuration was not allowed */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \
- (UINT32_C(0x5) << 0)
- /* Function driver unloaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD \
- (UINT32_C(0x10) << 0)
- /* Function driver loaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD \
- (UINT32_C(0x11) << 0)
- /* PF driver unloaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
- (UINT32_C(0x20) << 0)
- /* PF driver loaded */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD \
- (UINT32_C(0x21) << 0)
- /* VF Function Level Reset (FLR) */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (UINT32_C(0x30) << 0)
- /* VF MAC Address Change */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE \
- (UINT32_C(0x31) << 0)
- /* PF-VF communication channel status change. */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \
- (UINT32_C(0x32) << 0)
- /* HWRM Error */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \
- (UINT32_C(0xff) << 0)
+ /* unused1 is 10 b */
+ /*
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ /* unused1 is 10 b */
uint16_t event_id;
-
- /* Event specific data */
+ /* Identifiers of events. */
+ /* Link status changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE UINT32_C(0x0)
+ /* Link MTU changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE UINT32_C(0x1)
+ /* Link speed changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE UINT32_C(0x2)
+ /* DCB Configuration changed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE UINT32_C(0x3)
+ /* Port connection not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED UINT32_C(0x4)
+ /* Link speed configuration was not allowed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED UINT32_C(0x5)
+ /* Link speed configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE UINT32_C(0x6)
+ /* Port PHY configuration change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE UINT32_C(0x7)
+ /* Function driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD UINT32_C(0x10)
+ /* Function driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD UINT32_C(0x11)
+ /* Function FLR related processing has completed */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT UINT32_C(0x12)
+ /* PF driver unloaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD UINT32_C(0x20)
+ /* PF driver loaded */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD UINT32_C(0x21)
+ /* VF Function Level Reset (FLR) */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR UINT32_C(0x30)
+ /* VF MAC Address Change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE UINT32_C(0x31)
+ /* PF-VF communication channel status change. */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE UINT32_C(0x32)
+ /* VF Configuration Change */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE UINT32_C(0x33)
+ /* HWRM Error */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR UINT32_C(0xff)
uint32_t event_data2;
-
+ /* Event specific data */
+ uint8_t opaque_v;
/* opaque is 7 b */
/*
- * This value is written by the NIC such that it will be different for
- * each pass through the completion queue. The even passes will write 1.
- * The odd passes will write 0.
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
*/
- #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1)
/* opaque is 7 b */
- #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
- #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
- uint8_t opaque_v;
-
- /* 8-lsb timestamp from POR (100-msec resolution) */
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1
uint8_t timestamp_lo;
-
- /* 16-lsb timestamp from POR (100-msec resolution) */
+ /* 8-lsb timestamp from POR (100-msec resolution) */
uint16_t timestamp_hi;
-
- /* Event specific data */
+ /* 16-lsb timestamp from POR (100-msec resolution) */
uint32_t event_data1;
+ /* Event specific data */
} __attribute__((packed));
/*
* Note: The Hardware Resource Manager (HWRM) manages various hardware resources
* inside the chip. The HWRM is implemented in firmware, and runs on embedded
- * processors inside the chip. This firmware is vital part of the chip's
- * hardware. The chip can not be used by driver without it.
+ * processors inside the chip. This firmware service is vital part of the chip.
+ * The chip can not be used by a driver or HWRM client without the HWRM.
*/
/* Input (16 bytes) */
struct input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
} __attribute__((packed));
/* Output (8 bytes) */
struct output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
} __attribute__((packed));
-/* hwrm_cfa_l2_filter_alloc */
+/* hwrm_ver_get */
/*
- * A filter is used to identify traffic that contains a matching set of
- * parameters like unicast or broadcast MAC address or a VLAN tag amongst
- * other things which then allows the ASIC to direct the incoming traffic
- * to an appropriate VNIC or Rx ring.
+ * Description: This function is called by a driver to determine the HWRM
+ * interface version supported by the HWRM firmware, the version of HWRM
+ * firmware implementation, the name of HWRM firmware, the versions of other
+ * embedded firmwares, and the names of other embedded firmwares, etc. Any
+ * interface or firmware version with major = 0, minor = 0, and update = 0 shall
+ * be considered an invalid version.
*/
-
-/* Input (96 bytes) */
-struct hwrm_cfa_l2_filter_alloc_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+/* Input (24 bytes) */
+struct hwrm_ver_get_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /*
- * Enumeration denoting the RX, TX type of the resource. This
- * enumeration is used for resources that are similar for both TX and RX
- * paths of the chip.
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH \
- UINT32_C(0x1)
- /* tx path */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \
- (UINT32_C(0x0) << 0)
- /* rx path */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \
- (UINT32_C(0x1) << 0)
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \
- HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
- /*
- * Setting of this flag indicates the applicability to the loopback
- * path.
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \
- UINT32_C(0x2)
- /*
- * Setting of this flag indicates drop action. If this flag is not set,
- * then it should be considered accept action.
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP \
- UINT32_C(0x4)
- /*
- * If this flag is set, all t_l2_* fields are invalid and they should
- * not be specified. If this flag is set, then l2_* fields refer to
- * fields of outermost L2 header.
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST \
- UINT32_C(0x8)
- uint32_t flags;
-
- /* This bit must be '1' for the l2_addr field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \
- UINT32_C(0x1)
- /* This bit must be '1' for the l2_addr_mask field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \
- UINT32_C(0x2)
- /* This bit must be '1' for the l2_ovlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN \
- UINT32_C(0x4)
- /* This bit must be '1' for the l2_ovlan_mask field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \
- UINT32_C(0x8)
- /* This bit must be '1' for the l2_ivlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \
- UINT32_C(0x10)
- /* This bit must be '1' for the l2_ivlan_mask field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \
- UINT32_C(0x20)
- /* This bit must be '1' for the t_l2_addr field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR \
- UINT32_C(0x40)
- /*
- * This bit must be '1' for the t_l2_addr_mask field to be configured.
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \
- UINT32_C(0x80)
- /* This bit must be '1' for the t_l2_ovlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \
- UINT32_C(0x100)
- /*
- * This bit must be '1' for the t_l2_ovlan_mask field to be configured.
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \
- UINT32_C(0x200)
- /* This bit must be '1' for the t_l2_ivlan field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \
- UINT32_C(0x400)
- /*
- * This bit must be '1' for the t_l2_ivlan_mask field to be configured.
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \
- UINT32_C(0x800)
- /* This bit must be '1' for the src_type field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE \
- UINT32_C(0x1000)
- /* This bit must be '1' for the src_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID \
- UINT32_C(0x2000)
- /* This bit must be '1' for the tunnel_type field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \
- UINT32_C(0x4000)
- /* This bit must be '1' for the dst_id field to be configured. */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID \
- UINT32_C(0x8000)
- /*
- * This bit must be '1' for the mirror_vnic_id field to be configured.
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \
- UINT32_C(0x10000)
- uint32_t enables;
-
/*
- * This value sets the match value for the L2 MAC address. Destination
- * MAC address for RX path. Source MAC address for TX path.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
- uint8_t l2_addr[6];
-
- uint8_t unused_0;
- uint8_t unused_1;
-
+ uint8_t hwrm_intf_maj;
/*
- * This value sets the mask value for the L2 address. A value of 0 will
- * mask the corresponding bit from compare.
+ * This field represents the major version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * The interface major version is intended to change only when
+ * non backward compatible changes are made to the HWRM
+ * interface specification.
*/
- uint8_t l2_addr_mask[6];
-
- /* This value sets VLAN ID value for outer VLAN. */
- uint16_t l2_ovlan;
-
+ uint8_t hwrm_intf_min;
/*
- * This value sets the mask value for the ovlan id. A value of 0 will
- * mask the corresponding bit from compare.
+ * This field represents the minor version of HWRM interface
+ * specification supported by the driver HWRM implementation. A
+ * change in interface minor version is used to reflect
+ * significant backward compatible modification to HWRM
+ * interface specification. This can be due to addition or
+ * removal of functionality. HWRM interface specifications with
+ * the same major version but different minor versions are
+ * compatible.
*/
- uint16_t l2_ovlan_mask;
-
- /* This value sets VLAN ID value for inner VLAN. */
- uint16_t l2_ivlan;
-
+ uint8_t hwrm_intf_upd;
/*
- * This value sets the mask value for the ivlan id. A value of 0 will
- * mask the corresponding bit from compare.
+ * This field represents the update version of HWRM interface
+ * specification supported by the driver HWRM implementation.
+ * The interface update version is used to reflect minor changes
+ * or bug fixes to a released HWRM interface specification.
*/
- uint16_t l2_ivlan_mask;
-
- uint8_t unused_2;
- uint8_t unused_3;
+ uint8_t unused_0[5];
+} __attribute__((packed));
+/* Output (128 bytes) */
+struct hwrm_ver_get_output {
+ uint16_t error_code;
/*
- * This value sets the match value for the tunnel L2 MAC address.
- * Destination MAC address for RX path. Source MAC address for TX path.
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint8_t t_l2_addr[6];
-
- uint8_t unused_4;
- uint8_t unused_5;
-
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This value sets the mask value for the tunnel L2 address. A value of
- * 0 will mask the corresponding bit from compare.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint8_t t_l2_addr_mask[6];
-
- /* This value sets VLAN ID value for tunnel outer VLAN. */
- uint16_t t_l2_ovlan;
-
+ uint8_t hwrm_intf_maj;
/*
- * This value sets the mask value for the tunnel ovlan id. A value of 0
- * will mask the corresponding bit from compare.
+ * This field represents the major version of HWRM interface
+ * specification supported by the HWRM implementation. The
+ * interface major version is intended to change only when non
+ * backward compatible changes are made to the HWRM interface
+ * specification. A HWRM implementation that is compliant with
+ * this specification shall provide value of 1 in this field.
*/
- uint16_t t_l2_ovlan_mask;
-
- /* This value sets VLAN ID value for tunnel inner VLAN. */
- uint16_t t_l2_ivlan;
-
+ uint8_t hwrm_intf_min;
/*
- * This value sets the mask value for the tunnel ivlan id. A value of 0
- * will mask the corresponding bit from compare.
+ * This field represents the minor version of HWRM interface
+ * specification supported by the HWRM implementation. A change
+ * in interface minor version is used to reflect significant
+ * backward compatible modification to HWRM interface
+ * specification. This can be due to addition or removal of
+ * functionality. HWRM interface specifications with the same
+ * major version but different minor versions are compatible. A
+ * HWRM implementation that is compliant with this specification
+ * shall provide value of 2 in this field.
*/
- uint16_t t_l2_ivlan_mask;
-
- /* This value identifies the type of source of the packet. */
- /* Network port */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT \
- (UINT32_C(0x0) << 0)
- /* Physical function */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF \
- (UINT32_C(0x1) << 0)
- /* Virtual function */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF \
- (UINT32_C(0x2) << 0)
- /* Virtual NIC of a function */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC \
- (UINT32_C(0x3) << 0)
- /* Embedded processor for CFA management */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG \
- (UINT32_C(0x4) << 0)
- /* Embedded processor for OOB management */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE \
- (UINT32_C(0x5) << 0)
- /* Embedded processor for RoCE */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO \
- (UINT32_C(0x6) << 0)
- /* Embedded processor for network proxy functions */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG \
- (UINT32_C(0x7) << 0)
- uint8_t src_type;
-
- uint8_t unused_6;
+ uint8_t hwrm_intf_upd;
/*
- * This value is the id of the source. For a network port, it represents
- * port_id. For a physical function, it represents fid. For a virtual
- * function, it represents vf_id. For a vnic, it represents vnic_id. For
- * embedded processors, this id is not valid. Notes: 1. The function ID
- * is implied if it src_id is not provided for a src_type that is either
+ * This field represents the update version of HWRM interface
+ * specification supported by the HWRM implementation. The
+ * interface update version is used to reflect minor changes or
+ * bug fixes to a released HWRM interface specification. A HWRM
+ * implementation that is compliant with this specification
+ * shall provide value of 2 in this field.
*/
- uint32_t src_id;
-
- /* Tunnel Type. */
- /* Non-tunnel */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \
- (UINT32_C(0x0) << 0)
- /* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \
- (UINT32_C(0x1) << 0)
- /*
- * Network Virtualization Generic Routing Encapsulation (NVGRE)
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \
- (UINT32_C(0x2) << 0)
- /*
- * Generic Routing Encapsulation (GRE) inside Ethernet payload
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \
- (UINT32_C(0x3) << 0)
- /* IP in IP */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \
- (UINT32_C(0x4) << 0)
- /* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \
- (UINT32_C(0x5) << 0)
- /* Multi-Protocol Lable Switching (MPLS) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \
- (UINT32_C(0x6) << 0)
- /* Stateless Transport Tunnel (STT) */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \
- (UINT32_C(0x7) << 0)
- /*
- * Generic Routing Encapsulation (GRE) inside IP datagram
- * payload
- */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \
- (UINT32_C(0x8) << 0)
- /* Any tunneled traffic */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \
- (UINT32_C(0xff) << 0)
- uint8_t tunnel_type;
-
- uint8_t unused_7;
-
+ uint8_t hwrm_intf_rsvd;
+ uint8_t hwrm_fw_maj;
/*
- * If set, this value shall represent the Logical VNIC ID of the
- * destination VNIC for the RX path and network port id of the
- * destination port for the TX path.
+ * This field represents the major version of HWRM firmware. A
+ * change in firmware major version represents a major firmware
+ * release.
*/
- uint16_t dst_id;
-
- /* Logical VNIC ID of the VNIC where traffic is mirrored. */
- uint16_t mirror_vnic_id;
-
- /*
- * This hint is provided to help in placing the filter in the filter
- * table.
- */
- /* No preference */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \
- (UINT32_C(0x0) << 0)
- /* Above the given filter */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \
- (UINT32_C(0x1) << 0)
- /* Below the given filter */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \
- (UINT32_C(0x2) << 0)
- /* As high as possible */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX \
- (UINT32_C(0x3) << 0)
- /* As low as possible */
- #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN \
- (UINT32_C(0x4) << 0)
- uint8_t pri_hint;
-
- uint8_t unused_8;
- uint32_t unused_9;
-
+ uint8_t hwrm_fw_min;
/*
- * This is the ID of the filter that goes along with the pri_hint. This
- * field is valid only for the following values. 1 - Above the given
- * filter 2 - Below the given filter
+ * This field represents the minor version of HWRM firmware. A
+ * change in firmware minor version represents significant
+ * firmware functionality changes.
*/
- uint64_t l2_filter_id_hint;
-} __attribute__((packed));
-
-/* Output (24 bytes) */
-struct hwrm_cfa_l2_filter_alloc_output {
+ uint8_t hwrm_fw_bld;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * This field represents the build version of HWRM firmware. A
+ * change in firmware build version represents bug fixes to a
+ * released firmware.
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
- uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
- uint16_t seq_id;
-
+ uint8_t hwrm_fw_rsvd;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version of the HWRM
+ * firmware.
*/
- uint16_t resp_len;
-
+ uint8_t mgmt_fw_maj;
/*
- * This value identifies a set of CFA data structures used for an L2
- * context.
+ * This field represents the major version of mgmt firmware. A
+ * change in major version represents a major release.
*/
- uint64_t l2_filter_id;
-
+ uint8_t mgmt_fw_min;
/*
- * This is the ID of the flow associated with this filter. This value
- * shall be used to match and associate the flow identifier returned in
- * completion records. A value of 0xFFFFFFFF shall indicate no flow id.
+ * This field represents the minor version of mgmt firmware. A
+ * change in minor version represents significant functionality
+ * changes.
*/
- uint32_t flow_id;
-
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
-
+ uint8_t mgmt_fw_bld;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field represents the build version of mgmt firmware. A
+ * change in update version represents bug fixes.
*/
- uint8_t valid;
-} __attribute__((packed));
-
-/* hwrm_cfa_l2_filter_free */
-/*
- * Description: Free a L2 filter. The HWRM shall free all associated filter
- * resources with the L2 filter.
- */
-
-/* Input (24 bytes) */
-struct hwrm_cfa_l2_filter_free_input {
+ uint8_t mgmt_fw_rsvd;
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
*/
- uint16_t req_type;
-
+ uint8_t netctrl_fw_maj;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This field represents the major version of network control
+ * firmware. A change in major version represents a major
+ * release.
*/
- uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
+ uint8_t netctrl_fw_min;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This field represents the minor version of network control
+ * firmware. A change in minor version represents significant
+ * functionality changes.
*/
- uint16_t target_id;
-
+ uint8_t netctrl_fw_bld;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * This field represents the build version of network control
+ * firmware. A change in update version represents bug fixes.
*/
- uint64_t resp_addr;
-
+ uint8_t netctrl_fw_rsvd;
/*
- * This value identifies a set of CFA data structures used for an L2
- * context.
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
*/
- uint64_t l2_filter_id;
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_cfa_l2_filter_free_output {
+ uint32_t dev_caps_cfg;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * This field is used to indicate device's capabilities and
+ * configurations.
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
- uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
- uint16_t seq_id;
-
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * If set to 1, then secure firmware update behavior is
+ * supported. If set to 0, then secure firmware update behavior
+ * is not supported.
*/
- uint16_t resp_len;
-
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
-
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED UINT32_C(0x1)
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * If set to 1, then firmware based DCBX agent is supported. If
+ * set to 0, then firmware based DCBX agent capability is not
+ * supported on this device.
*/
- uint8_t valid;
-} __attribute__((packed));
-
-/* hwrm_cfa_l2_set_rx_mask */
-/* Description: This command will set rx mask of the function. */
-
-/* Input (40 bytes) */
-struct hwrm_cfa_l2_set_rx_mask_input {
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED UINT32_C(0x2)
+ uint8_t roce_fw_maj;
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * This field represents the major version of RoCE firmware. A
+ * change in major version represents a major release.
*/
- uint16_t req_type;
-
+ uint8_t roce_fw_min;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This field represents the minor version of RoCE firmware. A
+ * change in minor version represents significant functionality
+ * changes.
*/
- uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
+ uint8_t roce_fw_bld;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This field represents the build version of RoCE firmware. A
+ * change in update version represents bug fixes.
*/
- uint16_t target_id;
-
+ uint8_t roce_fw_rsvd;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * This field is a reserved field. This field can be used to
+ * represent firmware branches or customer specific releases
+ * tied to a specific (major,minor,update) version
*/
- uint64_t resp_addr;
-
- /* VNIC ID */
- uint32_t vnic_id;
-
- /* Reserved for future use. */
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_RESERVED UINT32_C(0x1)
+ char hwrm_fw_name[16];
/*
- * When this bit is '1', the function is requested to accept multi-cast
- * packets specified by the multicast addr table.
+ * This field represents the name of HWRM FW (ASCII chars with
+ * NULL at the end).
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST UINT32_C(0x2)
+ char mgmt_fw_name[16];
/*
- * When this bit is '1', the function is requested to accept all multi-
- * cast packets.
+ * This field represents the name of mgmt FW (ASCII chars with
+ * NULL at the end).
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST UINT32_C(0x4)
+ char netctrl_fw_name[16];
/*
- * When this bit is '1', the function is requested to accept broadcast
- * packets.
+ * This field represents the name of network control firmware
+ * (ASCII chars with NULL at the end).
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST UINT32_C(0x8)
+ uint32_t reserved2[4];
/*
- * When this bit is '1', the function is requested to be put in the
- * promiscuous mode. The HWRM should accept any function to set up
- * promiscuous mode. The HWRM shall follow the semantics below for the
- * promiscuous mode support. # When partitioning is not enabled on a
- * port (i.e. single PF on the port), then the PF shall be allowed to be
- * in the promiscuous mode. When the PF is in the promiscuous mode, then
- * it shall receive all host bound traffic on that port. # When
- * partitioning is enabled on a port (i.e. multiple PFs per port) and a
- * PF on that port is in the promiscuous mode, then the PF receives all
- * traffic within that partition as identified by a unique identifier
- * for the PF (e.g. S-Tag). If a unique outer VLAN for the PF is
- * specified, then the setting of promiscuous mode on that PF shall
- * result in the PF receiving all host bound traffic with matching outer
- * VLAN. # A VF shall can be set in the promiscuous mode. In the
- * promiscuous mode, the VF does not receive any traffic unless a unique
- * outer VLAN for the VF is specified. If a unique outer VLAN for the VF
- * is specified, then the setting of promiscuous mode on that VF shall
- * result in the VF receiving all host bound traffic with the matching
- * outer VLAN. # The HWRM shall allow the setting of promiscuous mode on
- * a function independently from the promiscuous mode settings on other
- * functions.
+ * This field is reserved for future use. The responder should
+ * set it to 0. The requester should ignore this field.
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10)
+ char roce_fw_name[16];
/*
- * If this flag is set, the corresponding RX filters shall be set up to
- * cover multicast/broadcast filters for the outermost Layer 2
- * destination MAC address field.
+ * This field represents the name of RoCE FW (ASCII chars with
+ * NULL at the end).
*/
- #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST UINT32_C(0x20)
- uint32_t mask;
-
- /* This is the address for mcast address tbl. */
- uint64_t mc_tbl_addr;
-
+ uint16_t chip_num;
+ /* This field returns the chip number. */
+ uint8_t chip_rev;
+ /* This field returns the revision of chip. */
+ uint8_t chip_metal;
+ /* This field returns the chip metal number. */
+ uint8_t chip_bond_id;
+ /* This field returns the bond id of the chip. */
+ uint8_t chip_platform_type;
/*
- * This value indicates how many entries in mc_tbl are valid. Each entry
- * is 6 bytes.
+ * This value indicates the type of platform used for chip
+ * implementation.
*/
- uint32_t num_mc_entries;
-
- uint32_t unused_0;
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_cfa_l2_set_rx_mask_output {
+ /* ASIC */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0)
+ /* FPGA platform of the chip. */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA UINT32_C(0x1)
+ /* Palladium platform of the chip. */
+ #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM UINT32_C(0x2)
+ uint16_t max_req_win_len;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * This field returns the maximum value of request window that
+ * is supported by the HWRM. The request window is mapped into
+ * device address space using MMIO.
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
- uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
- uint16_t seq_id;
-
+ uint16_t max_resp_len;
+ /* This field returns the maximum value of response buffer in bytes. */
+ uint16_t def_req_timeout;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field returns the default request timeout value in
+ * milliseconds.
*/
- uint16_t resp_len;
-
- uint32_t unused_0;
+ uint8_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
- uint8_t unused_3;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* hwrm_exec_fwd_resp */
+/* hwrm_func_reset */
/*
- * Description: This command is used to send an encapsulated request to the
- * HWRM. This command instructs the HWRM to execute the request and forward the
- * response of the encapsulated request to the location specified in the
- * original request that is encapsulated. The target id of this command shall be
- * set to 0xFFFF (HWRM). The response location in this command shall be used to
- * acknowledge the receipt of the encapsulated request and forwarding of the
- * response.
+ * Description: This command resets a hardware function (PCIe function) and
+ * frees any resources used by the function. This command shall be initiated by
+ * the driver after an FLR has occurred to prepare the function for re-use. This
+ * command may also be initiated by a driver prior to doing it's own
+ * configuration. This command puts the function into the reset state. In the
+ * reset state, global and port related features of the chip are not available.
*/
-
-/* Input (128 bytes) */
-struct hwrm_exec_fwd_resp_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+/*
+ * Note: This command will reset a function that has already been disabled or
+ * idled. The command returns all the resources owned by the function so a new
+ * driver may allocate and configure resources normally.
+ */
+/* Input (24 bytes) */
+struct hwrm_func_reset_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
/*
- * This is an encapsulated request. This request should be executed by
- * the HWRM and the response should be provided in the response buffer
- * inside the encapsulated request.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
- uint32_t encap_request[26];
-
+ uint32_t enables;
+ /* This bit must be '1' for the vf_id_valid field to be configured. */
+ #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID UINT32_C(0x1)
+ uint16_t vf_id;
/*
- * This value indicates the target id of the response to the
- * encapsulated request. 0x0 - 0xFFF8 - Used for function ids 0xFFF8 -
- * 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * The ID of the VF that this PF is trying to reset. Only the
+ * parent PF shall be allowed to reset a child VF. A parent PF
+ * driver shall use this field only when a specific child VF is
+ * requested to be reset.
*/
- uint16_t encap_resp_target_id;
-
- uint16_t unused_0[3];
+ uint8_t func_reset_level;
+ /* This value indicates the level of a function reset. */
+ /*
+ * Reset the caller function and its children
+ * VFs (if any). If no children functions exist,
+ * then reset the caller function only.
+ */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL UINT32_C(0x0)
+ /* Reset the caller function only */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME UINT32_C(0x1)
+ /*
+ * Reset all children VFs of the caller function
+ * driver if the caller is a PF driver. It is an
+ * error to specify this level by a VF driver.
+ * It is an error to specify this level by a PF
+ * driver with no children VFs.
+ */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN UINT32_C(0x2)
+ /*
+ * Reset a specific VF of the caller function
+ * driver if the caller is the parent PF driver.
+ * It is an error to specify this level by a VF
+ * driver. It is an error to specify this level
+ * by a PF driver that is not the parent of the
+ * VF that is being requested to reset.
+ */
+ #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF UINT32_C(0x3)
+ uint8_t unused_0;
} __attribute__((packed));
/* Output (16 bytes) */
-struct hwrm_exec_fwd_resp_output {
+struct hwrm_func_reset_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
uint32_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
uint8_t unused_3;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
/* hwrm_func_qcaps */
@@ -1978,351 +1734,761 @@ struct hwrm_exec_fwd_resp_output {
* physical function. The output FID value is needed to configure Rings and
* MSI-X vectors so their DMA operations appear correctly on the PCI bus.
*/
-
/* Input (24 bytes) */
struct hwrm_func_qcaps_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
/*
- * Function ID of the function that is being queried. 0xFF... (All Fs)
- * if the query is for the requesting function.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
uint16_t fid;
-
+ /*
+ * Function ID of the function that is being queried. 0xFF...
+ * (All Fs) if the query is for the requesting function.
+ */
uint16_t unused_0[3];
} __attribute__((packed));
/* Output (80 bytes) */
struct hwrm_func_qcaps_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
- /*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
- */
+ /* This field provides original sequence number of the command. */
uint16_t resp_len;
-
/*
- * FID value. This value is used to identify operations on the PCI bus
- * as belonging to a particular PCI function.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
uint16_t fid;
-
/*
- * Port ID of port that this function is associated with. Valid only for
- * the PF. 0xFF... (All Fs) if this function is not associated with any
- * port. 0xFF... (All Fs) if this function is called from a VF.
+ * FID value. This value is used to identify operations on the
+ * PCI bus as belonging to a particular PCI function.
*/
uint16_t port_id;
-
+ /*
+ * Port ID of port that this function is associated with. Valid
+ * only for the PF. 0xFF... (All Fs) if this function is not
+ * associated with any port. 0xFF... (All Fs) if this function
+ * is called from a VF.
+ */
+ uint32_t flags;
/* If 1, then Push mode is supported on this function. */
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1)
/*
- * If 1, then the global MSI-X auto-masking is enabled for the device.
+ * If 1, then the global MSI-X auto-masking is enabled for the
+ * device.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \
- UINT32_C(0x2)
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING UINT32_C(0x2)
/*
- * If 1, then the Precision Time Protocol (PTP) processing is supported
- * on this function. The HWRM should enable PTP on only a single
- * Physical Function (PF) per port.
+ * If 1, then the Precision Time Protocol (PTP) processing is
+ * supported on this function. The HWRM should enable PTP on
+ * only a single Physical Function (PF) per port.
*/
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4)
- uint32_t flags;
-
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4)
/*
- * This value is current MAC address configured for this function. A
- * value of 00-00-00-00-00-00 indicates no MAC address is currently
- * configured.
+ * If 1, then RDMA over Converged Ethernet (RoCE) v1 is
+ * supported on this function.
*/
- uint8_t perm_mac_address[6];
-
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED UINT32_C(0x8)
/*
- * The maximum number of RSS/COS contexts that can be allocated to the
- * function.
+ * If 1, then RDMA over Converged Ethernet (RoCE) v2 is
+ * supported on this function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED UINT32_C(0x10)
+ /*
+ * If 1, then control and configuration of WoL magic packet are
+ * supported on this function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED UINT32_C(0x20)
+ /*
+ * If 1, then control and configuration of bitmap pattern packet
+ * are supported on this function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_BMP_SUPPORTED UINT32_C(0x40)
+ /*
+ * If set to 1, then the control and configuration of rate limit
+ * of an allocated TX ring on the queried function is supported.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_RING_RL_SUPPORTED UINT32_C(0x80)
+ /*
+ * If 1, then control and configuration of minimum and maximum
+ * bandwidths are supported on the queried function.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_TX_BW_CFG_SUPPORTED UINT32_C(0x100)
+ /*
+ * If the query is for a VF, then this flag shall be ignored. If
+ * this query is for a PF and this flag is set to 1, then the PF
+ * has the capability to set the rate limits on the TX rings of
+ * its children VFs. If this query is for a PF and this flag is
+ * set to 0, then the PF does not have the capability to set the
+ * rate limits on the TX rings of its children VFs.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_TX_RING_RL_SUPPORTED UINT32_C(0x200)
+ /*
+ * If the query is for a VF, then this flag shall be ignored. If
+ * this query is for a PF and this flag is set to 1, then the PF
+ * has the capability to set the minimum and/or maximum
+ * bandwidths for its children VFs. If this query is for a PF
+ * and this flag is set to 0, then the PF does not have the
+ * capability to set the minimum or maximum bandwidths for its
+ * children VFs.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VF_BW_CFG_SUPPORTED UINT32_C(0x400)
+ uint8_t mac_address[6];
+ /*
+ * This value is current MAC address configured for this
+ * function. A value of 00-00-00-00-00-00 indicates no MAC
+ * address is currently configured.
*/
uint16_t max_rsscos_ctx;
-
/*
- * The maximum number of completion rings that can be allocated to the
- * function.
+ * The maximum number of RSS/COS contexts that can be allocated
+ * to the function.
*/
uint16_t max_cmpl_rings;
-
/*
- * The maximum number of transmit rings that can be allocated to the
- * function.
+ * The maximum number of completion rings that can be allocated
+ * to the function.
*/
uint16_t max_tx_rings;
-
/*
- * The maximum number of receive rings that can be allocated to the
- * function.
+ * The maximum number of transmit rings that can be allocated to
+ * the function.
*/
uint16_t max_rx_rings;
-
/*
- * The maximum number of L2 contexts that can be allocated to the
- * function.
+ * The maximum number of receive rings that can be allocated to
+ * the function.
*/
uint16_t max_l2_ctxs;
-
- /* The maximum number of VNICs that can be allocated to the function. */
+ /*
+ * The maximum number of L2 contexts that can be allocated to
+ * the function.
+ */
uint16_t max_vnics;
-
/*
- * The identifier for the first VF enabled on a PF. This is valid only
- * on the PF with SR-IOV enabled. 0xFF... (All Fs) if this command is
- * called on a PF with SR-IOV disabled or on a VF.
+ * The maximum number of VNICs that can be allocated to the
+ * function.
*/
uint16_t first_vf_id;
-
/*
- * The maximum number of VFs that can be allocated to the function. This
- * is valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if this
- * command is called on a PF with SR-IOV disabled or on a VF.
+ * The identifier for the first VF enabled on a PF. This is
+ * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
+ * this command is called on a PF with SR-IOV disabled or on a
+ * VF.
*/
uint16_t max_vfs;
-
/*
- * The maximum number of statistic contexts that can be allocated to the
- * function.
+ * The maximum number of VFs that can be allocated to the
+ * function. This is valid only on the PF with SR-IOV enabled.
+ * 0xFF... (All Fs) if this command is called on a PF with SR-
+ * IOV disabled or on a VF.
*/
uint16_t max_stat_ctx;
-
/*
- * The maximum number of Encapsulation records that can be offloaded by
- * this function.
+ * The maximum number of statistic contexts that can be
+ * allocated to the function.
*/
uint32_t max_encap_records;
-
/*
- * The maximum number of decapsulation records that can be offloaded by
- * this function.
+ * The maximum number of Encapsulation records that can be
+ * offloaded by this function.
*/
uint32_t max_decap_records;
-
/*
- * The maximum number of Exact Match (EM) flows that can be offloaded by
- * this function on the TX side.
+ * The maximum number of decapsulation records that can be
+ * offloaded by this function.
*/
uint32_t max_tx_em_flows;
-
/*
- * The maximum number of Wildcard Match (WM) flows that can be offloaded
- * by this function on the TX side.
+ * The maximum number of Exact Match (EM) flows that can be
+ * offloaded by this function on the TX side.
*/
uint32_t max_tx_wm_flows;
-
/*
- * The maximum number of Exact Match (EM) flows that can be offloaded by
- * this function on the RX side.
+ * The maximum number of Wildcard Match (WM) flows that can be
+ * offloaded by this function on the TX side.
*/
uint32_t max_rx_em_flows;
-
/*
- * The maximum number of Wildcard Match (WM) flows that can be offloaded
- * by this function on the RX side.
+ * The maximum number of Exact Match (EM) flows that can be
+ * offloaded by this function on the RX side.
*/
uint32_t max_rx_wm_flows;
-
/*
- * The maximum number of multicast filters that can be supported by this
- * function on the RX side.
+ * The maximum number of Wildcard Match (WM) flows that can be
+ * offloaded by this function on the RX side.
*/
uint32_t max_mcast_filters;
-
/*
- * The maximum value of flow_id that can be supported in completion
- * records.
+ * The maximum number of multicast filters that can be supported
+ * by this function on the RX side.
*/
uint32_t max_flow_id;
-
/*
- * The maximum number of HW ring groups that can be supported on this
- * function.
+ * The maximum value of flow_id that can be supported in
+ * completion records.
*/
uint32_t max_hw_ring_grps;
-
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
-
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * The maximum number of HW ring groups that can be supported on
+ * this function.
+ */
+ uint16_t max_sp_tx_rings;
+ /*
+ * The maximum number of strict priority transmit rings that can
+ * be allocated to the function. This number indicates the
+ * maximum number of TX rings that can be assigned strict
+ * priorities out of the maximum number of TX rings that can be
+ * allocated (max_tx_rings) to the function.
*/
+ uint8_t unused_0;
uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
} __attribute__((packed));
-/* hwrm_func_reset */
-/*
- * Description: This command resets a hardware function (PCIe function) and
- * frees any resources used by the function. This command shall be initiated by
- * the driver after an FLR has occurred to prepare the function for re-use. This
- * command may also be initiated by a driver prior to doing it's own
- * configuration. This command puts the function into the reset state. In the
- * reset state, global and port related features of the chip are not available.
- */
+/* hwrm_func_qcfg */
/*
- * Note: This command will reset a function that has already been disabled or
- * idled. The command returns all the resources owned by the function so a new
- * driver may allocate and configure resources normally.
+ * Description: This command returns the current configuration of a function.
+ * The input FID value is used to indicate what function is being queried. This
+ * allows a physical function driver to query virtual functions that are
+ * children of the physical function. The output FID value is needed to
+ * configure Rings and MSI-X vectors so their DMA operations appear correctly on
+ * the PCI bus.
*/
-
/* Input (24 bytes) */
-struct hwrm_func_reset_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+struct hwrm_func_qcfg_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
/* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t fid;
+ /*
+ * Function ID of the function that is being queried. 0xFF...
+ * (All Fs) if the query is for the requesting function.
+ */
+ uint16_t unused_0[3];
+} __attribute__((packed));
+
+/* Output (72 bytes) */
+struct hwrm_func_qcfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint16_t fid;
+ /*
+ * FID value. This value is used to identify operations on the
+ * PCI bus as belonging to a particular PCI function.
+ */
+ uint16_t port_id;
+ /*
+ * Port ID of port that this function is associated with.
+ * 0xFF... (All Fs) if this function is not associated with any
+ * port.
+ */
+ uint16_t vlan;
+ /*
+ * This value is the current VLAN setting for this function. The
+ * value of 0 for this field indicates no priority tagging or
+ * VLAN is used. This field's format is same as 802.1Q Tag's Tag
+ * Control Information (TCI) format that includes both Priority
+ * Code Point (PCP) and VLAN Identifier (VID).
+ */
+ uint16_t flags;
+ /*
+ * If 1, then magic packet based Out-Of-Box WoL is enabled on
+ * the port associated with this function.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_MAGICPKT_ENABLED \
+ UINT32_C(0x1)
+ /*
+ * If 1, then bitmap pattern based Out-Of-Box WoL packet is
+ * enabled on the port associated with this function.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_OOB_WOL_BMP_ENABLED UINT32_C(0x2)
+ /*
+ * If set to 1, then FW based DCBX agent is enabled and running
+ * on the port associated with this function. If set to 0, then
+ * DCBX agent is not running in the firmware.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED \
+ UINT32_C(0x4)
+ uint8_t mac_address[6];
+ /*
+ * This value is current MAC address configured for this
+ * function. A value of 00-00-00-00-00-00 indicates no MAC
+ * address is currently configured.
+ */
+ uint16_t pci_id;
+ /*
+ * This value is current PCI ID of this function. If ARI is
+ * enabled, then it is Bus Number (8b):Function Number(8b).
+ * Otherwise, it is Bus Number (8b):Device Number (4b):Function
+ * Number(4b).
+ */
+ uint16_t alloc_rsscos_ctx;
+ /*
+ * The number of RSS/COS contexts currently allocated to the
+ * function.
+ */
+ uint16_t alloc_cmpl_rings;
+ /*
+ * The number of completion rings currently allocated to the
+ * function. This does not include the rings allocated to any
+ * children functions if any.
+ */
+ uint16_t alloc_tx_rings;
+ /*
+ * The number of transmit rings currently allocated to the
+ * function. This does not include the rings allocated to any
+ * children functions if any.
+ */
+ uint16_t alloc_rx_rings;
+ /*
+ * The number of receive rings currently allocated to the
+ * function. This does not include the rings allocated to any
+ * children functions if any.
+ */
+ uint16_t alloc_l2_ctx;
+ /* The allocated number of L2 contexts to the function. */
+ uint16_t alloc_vnics;
+ /* The allocated number of vnics to the function. */
+ uint16_t mtu;
+ /*
+ * The maximum transmission unit of the function. For rings
+ * allocated on this function, this default value is used if
+ * ring MTU is not specified.
+ */
+ uint16_t mru;
+ /*
+ * The maximum receive unit of the function. For vnics allocated
+ * on this function, this default value is used if vnic MRU is
+ * not specified.
+ */
+ uint16_t stat_ctx_id;
+ /* The statistics context assigned to a function. */
+ uint8_t port_partition_type;
+ /*
+ * The HWRM shall return Unknown value for this field when this
+ * command is used to query VF's configuration.
+ */
+ /* Single physical function */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_SPF UINT32_C(0x0)
+ /* Multiple physical functions */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_MPFS UINT32_C(0x1)
+ /* Network Partitioning 1.0 */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0 \
+ UINT32_C(0x2)
+ /* Network Partitioning 1.5 */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5 \
+ UINT32_C(0x3)
+ /* Network Partitioning 2.0 */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0 \
+ UINT32_C(0x4)
+ /* Unknown */
+ #define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN \
+ UINT32_C(0xff)
+ uint8_t unused_0;
+ uint16_t dflt_vnic_id;
+ /* The default VNIC ID assigned to a function that is being queried. */
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint32_t min_bw;
+ /*
+ * Minimum BW allocated for this function. The HWRM will
+ * translate this value into byte counter and time interval used
+ * for the scheduler inside the device. A value of 0 indicates
+ * the minimum bandwidth is not configured.
+ */
+ /* Bandwidth value */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_SFT 0
+ /* Reserved */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_RSVD UINT32_C(0x10000000)
+ /* bw_value_unit is 3 b */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mbps */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_MBPS \
+ (UINT32_C(0x0) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_LAST \
+ FUNC_QCFG_OUTPUT_MIN_BW_BW_VALUE_UNIT_INVALID
+ uint32_t max_bw;
+ /*
+ * Maximum BW allocated for this function. The HWRM will
+ * translate this value into byte counter and time interval used
+ * for the scheduler inside the device. A value of 0 indicates
+ * that the maximum bandwidth is not configured.
+ */
+ /* Bandwidth value */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_SFT 0
+ /* Reserved */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_RSVD UINT32_C(0x10000000)
+ /* bw_value_unit is 3 b */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mbps */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_MBPS \
+ (UINT32_C(0x0) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ FUNC_QCFG_OUTPUT_MAX_BW_BW_VALUE_UNIT_INVALID
+ uint8_t evb_mode;
+ /*
+ * This value indicates the Edge virtual bridge mode for the
+ * domain that this function belongs to.
+ */
+ /* No Edge Virtual Bridging (EVB) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_NO_EVB UINT32_C(0x0)
+ /* Virtual Ethernet Bridge (VEB) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1)
+ /* Virtual Ethernet Port Aggregator (VEPA) */
+ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2)
+ uint8_t unused_3;
+ uint16_t alloc_vfs;
+ /*
+ * The number of VFs that are allocated to the function. This is
+ * valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if
+ * this command is called on a PF with SR-IOV disabled or on a
+ * VF.
+ */
+ uint32_t alloc_mcast_filters;
+ /*
+ * The number of allocated multicast filters for this function
+ * on the RX side.
+ */
+ uint32_t alloc_hw_ring_grps;
+ /* The number of allocated HW ring groups for this function. */
+ uint16_t alloc_sp_tx_rings;
+ /*
+ * The number of strict priority transmit rings out of currently
+ * allocated TX rings to the function (alloc_tx_rings).
+ */
+ uint8_t unused_4;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+/* hwrm_func_drv_rgtr */
+/*
+ * Description: This command is used by the function driver to register its
+ * information with the HWRM. A function driver shall implement this command. A
+ * function driver shall use this command during the driver initialization right
+ * after the HWRM version discovery and default ring resources allocation.
+ */
+/* Input (80 bytes) */
+struct hwrm_func_drv_rgtr_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /* This bit must be '1' for the vf_id_valid field to be configured. */
- #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID \
- UINT32_C(0x1)
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the function driver is requesting all
+ * requests from its children VF drivers to be forwarded to
+ * itself. This flag can only be set by the PF driver. If a VF
+ * driver sets this flag, it should be ignored by the HWRM.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1)
+ /*
+ * When this bit is '1', the function is requesting none of the
+ * requests from its children VF drivers to be forwarded to
+ * itself. This flag can only be set by the PF driver. If a VF
+ * driver sets this flag, it should be ignored by the HWRM.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2)
uint32_t enables;
-
+ /* This bit must be '1' for the os_type field to be configured. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE UINT32_C(0x1)
+ /* This bit must be '1' for the ver field to be configured. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER UINT32_C(0x2)
+ /* This bit must be '1' for the timestamp field to be configured. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP UINT32_C(0x4)
+ /* This bit must be '1' for the vf_req_fwd field to be configured. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD UINT32_C(0x8)
/*
- * The ID of the VF that this PF is trying to reset. Only the parent PF
- * shall be allowed to reset a child VF. A parent PF driver shall use
- * this field only when a specific child VF is requested to be reset.
+ * This bit must be '1' for the async_event_fwd field to be
+ * configured.
*/
- uint16_t vf_id;
-
- /* This value indicates the level of a function reset. */
- /*
- * Reset the caller function and its children VFs (if any). If
- * no children functions exist, then reset the caller function
- * only.
- */
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL \
- (UINT32_C(0x0) << 0)
- /* Reset the caller function only */
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME \
- (UINT32_C(0x1) << 0)
- /*
- * Reset all children VFs of the caller function driver if the
- * caller is a PF driver. It is an error to specify this level
- * by a VF driver. It is an error to specify this level by a PF
- * driver with no children VFs.
- */
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \
- (UINT32_C(0x2) << 0)
- /*
- * Reset a specific VF of the caller function driver if the
- * caller is the parent PF driver. It is an error to specify
- * this level by a VF driver. It is an error to specify this
- * level by a PF driver that is not the parent of the VF that is
- * being requested to reset.
- */
- #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF \
- (UINT32_C(0x3) << 0)
- uint8_t func_reset_level;
-
+ #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10)
+ uint16_t os_type;
+ /* This value indicates the type of OS. */
+ /* Unknown */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
+ /* Other OS not listed below. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER UINT32_C(0x1)
+ /* MSDOS OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS UINT32_C(0xe)
+ /* Windows OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS UINT32_C(0x12)
+ /* Solaris OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS UINT32_C(0x1d)
+ /* Linux OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX UINT32_C(0x24)
+ /* FreeBSD OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD UINT32_C(0x2a)
+ /* VMware ESXi OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI UINT32_C(0x68)
+ /* Microsoft Windows 8 64-bit OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 UINT32_C(0x73)
+ /* Microsoft Windows Server 2012 R2 OS. */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 UINT32_C(0x74)
+ uint8_t ver_maj;
+ /* This is the major version of the driver. */
+ uint8_t ver_min;
+ /* This is the minor version of the driver. */
+ uint8_t ver_upd;
+ /* This is the update version of the driver. */
uint8_t unused_0;
+ uint16_t unused_1;
+ uint32_t timestamp;
+ /*
+ * This is a 32-bit timestamp provided by the driver for keep
+ * alive. The timestamp is in multiples of 1ms.
+ */
+ uint32_t unused_2;
+ uint32_t vf_req_fwd[8];
+ /*
+ * This is a 256-bit bit mask provided by the PF driver for
+ * letting the HWRM know what commands issued by the VF driver
+ * to the HWRM should be forwarded to the PF driver. Nth bit
+ * refers to the Nth req_type. Setting Nth bit to 1 indicates
+ * that requests from the VF driver with req_type equal to N
+ * shall be forwarded to the parent PF driver. This field is not
+ * valid for the VF driver.
+ */
+ uint32_t async_event_fwd[8];
+ /*
+ * This is a 256-bit bit mask provided by the function driver
+ * (PF or VF driver) to indicate the list of asynchronous event
+ * completions to be forwarded. Nth bit refers to the Nth
+ * event_id. Setting Nth bit to 1 by the function driver shall
+ * result in the HWRM forwarding asynchronous event completion
+ * with event_id equal to N. If all bits are set to 0 (value of
+ * 0), then the HWRM shall not forward any asynchronous event
+ * completion to this function driver.
+ */
} __attribute__((packed));
/* Output (16 bytes) */
-struct hwrm_func_reset_output {
+struct hwrm_func_drv_rgtr_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
/* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_func_drv_unrgtr */
+/*
+ * Description: This command is used by the function driver to un register with
+ * the HWRM. A function driver shall implement this command. A function driver
+ * shall use this command during the driver unloading.
+ */
+/* Input (24 bytes) */
+struct hwrm_func_drv_unrgtr_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the function driver is notifying the
+ * HWRM to prepare for the shutdown.
+ */
+ #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN UINT32_C(0x1)
+ uint32_t unused_0;
+} __attribute__((packed));
+/* Output (16 bytes) */
+struct hwrm_func_drv_unrgtr_output {
+ uint16_t error_code;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
uint16_t resp_len;
-
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
uint32_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
uint8_t unused_3;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
/* hwrm_port_phy_cfg */
@@ -2334,118 +2500,168 @@ struct hwrm_func_reset_output {
* configure PHY using this command. In a network partition mode, a PF driver
* shall not be allowed to configure PHY using this command.
*/
-
/* Input (56 bytes) */
struct hwrm_port_phy_cfg_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
/*
- * When this bit is set to '1', the PHY for the port shall be reset. #
- * If this bit is set to 1, then the HWRM shall reset the PHY after
- * applying PHY configuration changes specified in this command. # In
- * order to guarantee that PHY configuration changes specified in this
- * command take effect, the HWRM client should set this flag to 1. # If
- * this bit is not set to 1, then the HWRM may reset the PHY depending
- * on the current PHY configuration and settings specified in this
- * command.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1)
- /*
- * When this bit is set to '1', the link shall be forced to be taken
- * down. # When this bit is set to '1", all other command input settings
- * related to the link speed shall be ignored. Once the link state is
- * forced down, it can be explicitly cleared from that state by setting
- * this flag to '0'. # If this flag is set to '0', then the link shall
- * be cleared from forced down state if the link is in forced down
- * state. There may be conditions (e.g. out-of-band or sideband
- * configuration changes for the link) outside the scope of the HWRM
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is set to '1', the PHY for the port shall be
+ * reset. # If this bit is set to 1, then the HWRM shall reset
+ * the PHY after applying PHY configuration changes specified in
+ * this command. # In order to guarantee that PHY configuration
+ * changes specified in this command take effect, the HWRM
+ * client should set this flag to 1. # If this bit is not set to
+ * 1, then the HWRM may reset the PHY depending on the current
+ * PHY configuration and settings specified in this command.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', the link shall be forced to be
+ * taken down. # When this bit is set to '1", all other command
+ * input settings related to the link speed shall be ignored.
+ * Once the link state is forced down, it can be explicitly
+ * cleared from that state by setting this flag to '0'. # If
+ * this flag is set to '0', then the link shall be cleared from
+ * forced down state if the link is in forced down state. There
+ * may be conditions (e.g. out-of-band or sideband configuration
+ * changes for the link) outside the scope of the HWRM
* implementation that may clear forced down link state.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN UINT32_C(0x2)
/*
* When this bit is set to '1', the link shall be forced to the
- * force_link_speed value. When this bit is set to '1', the HWRM client
- * should not enable any of the auto negotiation related fields
- * represented by auto_XXX fields in this command. When this bit is set
- * to '1' and the HWRM client has enabled a auto_XXX field in this
- * command, then the HWRM shall ignore the enabled auto_XXX field. When
- * this bit is set to zero, the link shall be allowed to autoneg.
+ * force_link_speed value. When this bit is set to '1', the HWRM
+ * client should not enable any of the auto negotiation related
+ * fields represented by auto_XXX fields in this command. When
+ * this bit is set to '1' and the HWRM client has enabled a
+ * auto_XXX field in this command, then the HWRM shall ignore
+ * the enabled auto_XXX field. When this bit is set to zero, the
+ * link shall be allowed to autoneg.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE UINT32_C(0x4)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE UINT32_C(0x4)
/*
- * When this bit is set to '1', the auto-negotiation process shall be
- * restarted on the link.
+ * When this bit is set to '1', the auto-negotiation process
+ * shall be restarted on the link.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8)
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8)
/*
- * When this bit is set to '1', Energy Efficient Ethernet (EEE) is
- * requested to be enabled on this link. If EEE is not supported on this
- * port, then this flag shall be ignored by the HWRM.
+ * When this bit is set to '1', Energy Efficient Ethernet (EEE)
+ * is requested to be enabled on this link. If EEE is not
+ * supported on this port, then this flag shall be ignored by
+ * the HWRM.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE UINT32_C(0x10)
/*
- * When this bit is set to '1', Energy Efficient Ethernet (EEE) is
- * requested to be disabled on this link. If EEE is not supported on
- * this port, then this flag shall be ignored by the HWRM.
+ * When this bit is set to '1', Energy Efficient Ethernet (EEE)
+ * is requested to be disabled on this link. If EEE is not
+ * supported on this port, then this flag shall be ignored by
+ * the HWRM.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE UINT32_C(0x20)
/*
- * When this bit is set to '1' and EEE is enabled on this link, then TX
- * LPI is requested to be enabled on the link. If EEE is not supported
- * on this port, then this flag shall be ignored by the HWRM. If EEE is
- * disabled on this port, then this flag shall be ignored by the HWRM.
+ * When this bit is set to '1' and EEE is enabled on this link,
+ * then TX LPI is requested to be enabled on the link. If EEE is
+ * not supported on this port, then this flag shall be ignored
+ * by the HWRM. If EEE is disabled on this port, then this flag
+ * shall be ignored by the HWRM.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI UINT32_C(0x40)
- uint32_t flags;
-
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_ENABLE UINT32_C(0x40)
+ /*
+ * When this bit is set to '1' and EEE is enabled on this link,
+ * then TX LPI is requested to be disabled on the link. If EEE
+ * is not supported on this port, then this flag shall be
+ * ignored by the HWRM. If EEE is disabled on this port, then
+ * this flag shall be ignored by the HWRM.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80)
+ /*
+ * When set to 1, then the HWRM shall enable FEC
+ * autonegotitation on this port if supported. When set to 0,
+ * then this flag shall be ignored. If FEC autonegotiation is
+ * not supported, then the HWRM shall ignore this flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100)
+ /*
+ * When set to 1, then the HWRM shall disable FEC
+ * autonegotiation on this port if supported. When set to 0,
+ * then this flag shall be ignored. If FEC autonegotiation is
+ * not supported, then the HWRM shall ignore this flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE UINT32_C(0x200)
+ /*
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire
+ * Code) on this port if supported. When set to 0, then this
+ * flag shall be ignored. If FEC CLAUSE 74 is not supported,
+ * then the HWRM shall ignore this flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE UINT32_C(0x400)
+ /*
+ * When set to 1, then the HWRM shall disable FEC CLAUSE 74
+ * (Fire Code) on this port if supported. When set to 0, then
+ * this flag shall be ignored. If FEC CLAUSE 74 is not
+ * supported, then the HWRM shall ignore this flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_DISABLE UINT32_C(0x800)
+ /*
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 91 (Reed
+ * Solomon) on this port if supported. When set to 0, then this
+ * flag shall be ignored. If FEC CLAUSE 91 is not supported,
+ * then the HWRM shall ignore this flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_ENABLE UINT32_C(0x1000)
+ /*
+ * When set to 1, then the HWRM shall disable FEC CLAUSE 91
+ * (Reed Solomon) on this port if supported. When set to 0, then
+ * this flag shall be ignored. If FEC CLAUSE 91 is not
+ * supported, then the HWRM shall ignore this flag.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE UINT32_C(0x2000)
+ uint32_t enables;
/* This bit must be '1' for the auto_mode field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1)
/* This bit must be '1' for the auto_duplex field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX UINT32_C(0x2)
/* This bit must be '1' for the auto_pause field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE UINT32_C(0x4)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE UINT32_C(0x4)
/*
- * This bit must be '1' for the auto_link_speed field to be configured.
+ * This bit must be '1' for the auto_link_speed field to be
+ * configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED UINT32_C(0x8)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED UINT32_C(0x8)
/*
* This bit must be '1' for the auto_link_speed_mask field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \
- UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK UINT32_C(0x10)
/* This bit must be '1' for the wirespeed field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIRESPEED UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIOUTPUTEED UINT32_C(0x20)
/* This bit must be '1' for the lpbk field to be configured. */
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK UINT32_C(0x40)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK UINT32_C(0x40)
/* This bit must be '1' for the preemphasis field to be configured. */
#define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS UINT32_C(0x80)
/* This bit must be '1' for the force_pause field to be configured. */
@@ -2454,604 +2670,493 @@ struct hwrm_port_phy_cfg_input {
* This bit must be '1' for the eee_link_speed_mask field to be
* configured.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \
- UINT32_C(0x200)
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK UINT32_C(0x200)
/* This bit must be '1' for the tx_lpi_timer field to be configured. */
#define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER UINT32_C(0x400)
- uint32_t enables;
-
- /* Port ID of port that is to be configured. */
uint16_t port_id;
-
- /*
- * This is the speed that will be used if the force bit is '1'. If
- * unsupported speed is selected, an error will be generated.
- */
- /* 100Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB \
- (UINT32_C(0x1) << 0)
- /* 1Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB \
- (UINT32_C(0xa) << 0)
- /* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB \
- (UINT32_C(0x14) << 0)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB \
- (UINT32_C(0x19) << 0)
- /* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB \
- (UINT32_C(0x64) << 0)
- /* 20Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB \
- (UINT32_C(0xc8) << 0)
- /* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB \
- (UINT32_C(0xfa) << 0)
- /* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB \
- (UINT32_C(0x190) << 0)
- /* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB \
- (UINT32_C(0x1f4) << 0)
- /* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB \
- (UINT32_C(0x3e8) << 0)
- /* 10Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB \
- (UINT32_C(0xffff) << 0)
+ /* Port ID of port that is to be configured. */
uint16_t force_link_speed;
-
/*
- * This value is used to identify what autoneg mode is used when the
- * link speed is not being forced.
- */
- /*
- * Disable autoneg or autoneg disabled. No speeds are selected.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE (UINT32_C(0x0) << 0)
- /* Select all possible speeds for autoneg mode. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS \
- (UINT32_C(0x1) << 0)
- /*
- * Select only the auto_link_speed speed for autoneg mode. This
- * mode has been DEPRECATED. An HWRM client should not use this
- * mode.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED \
- (UINT32_C(0x2) << 0)
- /*
- * Select the auto_link_speed or any speed below that speed for
- * autoneg. This mode has been DEPRECATED. An HWRM client should
- * not use this mode.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW \
- (UINT32_C(0x3) << 0)
- /*
- * Select the speeds based on the corresponding link speed mask
- * value that is provided.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK \
- (UINT32_C(0x4) << 0)
+ * This is the speed that will be used if the force bit is '1'.
+ * If unsupported speed is selected, an error will be generated.
+ */
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB UINT32_C(0x1)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB UINT32_C(0x19)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB UINT32_C(0x3e8)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
uint8_t auto_mode;
-
/*
- * This is the duplex setting that will be used if the autoneg_mode is
- * "one_speed" or "one_or_below".
- */
- /* Half Duplex will be requested. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF \
- (UINT32_C(0x0) << 0)
- /* Full duplex will be requested. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL \
- (UINT32_C(0x1) << 0)
- /* Both Half and Full dupex will be requested. */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH \
- (UINT32_C(0x2) << 0)
- uint8_t auto_duplex;
-
+ * This value is used to identify what autoneg mode is used when
+ * the link speed is not being forced.
+ */
/*
- * This value is used to configure the pause that will be used for
- * autonegotiation. Add text on the usage of auto_pause and force_pause.
+ * Disable autoneg or autoneg disabled. No
+ * speeds are selected.
*/
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE UINT32_C(0x0)
+ /* Select all possible speeds for autoneg mode. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * When this bit is '1', Generation of tx pause messages has been
- * requested. Disabled otherwise.
+ * Select only the auto_link_speed speed for
+ * autoneg mode. This mode has been DEPRECATED.
+ * An HWRM client should not use this mode.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
/*
- * When this bit is '1', Reception of rx pause messages has been
- * requested. Disabled otherwise.
+ * Select the auto_link_speed or any speed below
+ * that speed for autoneg. This mode has been
+ * DEPRECATED. An HWRM client should not use
+ * this mode.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
- * When set to 1, the advertisement of pause is enabled. # When the
- * auto_mode is not set to none and this flag is set to 1, then the
- * auto_pause bits on this port are being advertised and autoneg pause
- * results are being interpreted. # When the auto_mode is not set to
- * none and this flag is set to 0, the pause is forced as indicated in
- * force_pause, and also advertised as auto_pause bits, but the autoneg
- * results are not interpreted since the pause configuration is being
- * forced. # When the auto_mode is set to none and this flag is set to
- * 1, auto_pause bits should be ignored and should be set to 0.
+ * Select the speeds based on the corresponding
+ * link speed mask value that is provided.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
+ uint8_t auto_duplex;
+ /*
+ * This is the duplex setting that will be used if the
+ * autoneg_mode is "one_speed" or "one_or_below".
+ */
+ /* Half Duplex will be requested. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF UINT32_C(0x0)
+ /* Full duplex will be requested. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL UINT32_C(0x1)
+ /* Both Half and Full dupex will be requested. */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH UINT32_C(0x2)
uint8_t auto_pause;
-
+ /*
+ * This value is used to configure the pause that will be used
+ * for autonegotiation. Add text on the usage of auto_pause and
+ * force_pause.
+ */
+ /*
+ * When this bit is '1', Generation of tx pause messages has
+ * been requested. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages has been
+ * requested. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX UINT32_C(0x2)
+ /*
+ * When set to 1, the advertisement of pause is enabled. # When
+ * the auto_mode is not set to none and this flag is set to 1,
+ * then the auto_pause bits on this port are being advertised
+ * and autoneg pause results are being interpreted. # When the
+ * auto_mode is not set to none and this flag is set to 0, the
+ * pause is forced as indicated in force_pause, and also
+ * advertised as auto_pause bits, but the autoneg results are
+ * not interpreted since the pause configuration is being
+ * forced. # When the auto_mode is set to none and this flag is
+ * set to 1, auto_pause bits should be ignored and should be set
+ * to 0.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
uint8_t unused_0;
-
+ uint16_t auto_link_speed;
/*
* This is the speed that will be used if the autoneg_mode is
- * "one_speed" or "one_or_below". If an unsupported speed is selected,
- * an error will be generated.
- */
- /* 100Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB \
- (UINT32_C(0x1) << 0)
- /* 1Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB \
- (UINT32_C(0xa) << 0)
- /* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB \
- (UINT32_C(0x14) << 0)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB \
- (UINT32_C(0x19) << 0)
- /* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB \
- (UINT32_C(0x64) << 0)
- /* 20Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB \
- (UINT32_C(0xc8) << 0)
- /* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB \
- (UINT32_C(0xfa) << 0)
- /* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB \
- (UINT32_C(0x190) << 0)
- /* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB \
- (UINT32_C(0x1f4) << 0)
- /* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB \
- (UINT32_C(0x3e8) << 0)
- /* 10Mb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB \
- (UINT32_C(0xffff) << 0)
- uint16_t auto_link_speed;
-
+ * "one_speed" or "one_or_below". If an unsupported speed is
+ * selected, an error will be generated.
+ */
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
+ uint16_t auto_link_speed_mask;
/*
- * This is a mask of link speeds that will be used if autoneg_mode is
- * "mask". If unsupported speed is enabled an error will be generated.
+ * This is a mask of link speeds that will be used if
+ * autoneg_mode is "mask". If unsupported speed is enabled an
+ * error will be generated.
*/
/* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD UINT32_C(0x1)
/* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \
- UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB UINT32_C(0x2)
/* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \
- UINT32_C(0x4)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD UINT32_C(0x4)
/* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \
- UINT32_C(0x8)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \
- UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB UINT32_C(0x10)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \
- UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB \
- UINT32_C(0x40)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB \
- UINT32_C(0x80)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \
- UINT32_C(0x100)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \
- UINT32_C(0x200)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \
- UINT32_C(0x400)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \
- UINT32_C(0x800)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB UINT32_C(0x800)
/* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \
- UINT32_C(0x1000)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD UINT32_C(0x1000)
/* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \
- UINT32_C(0x2000)
- uint16_t auto_link_speed_mask;
-
- /* This value controls the wirespeed feature. */
- /* Wirespeed feature is disabled. */
- #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_OFF (UINT32_C(0x0) << 0)
- /* Wirespeed feature is enabled. */
- #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON (UINT32_C(0x1) << 0)
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB UINT32_C(0x2000)
uint8_t wirespeed;
-
- /* This value controls the loopback setting for the PHY. */
- /* No loopback is selected. Normal operation. */
- #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE (UINT32_C(0x0) << 0)
- /*
- * The HW will be configured with local loopback such that host
- * data is sent back to the host without modification.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL (UINT32_C(0x1) << 0)
- /*
- * The HW will be configured with remote loopback such that port
- * logic will send packets back out the transmitter that are
- * received.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE (UINT32_C(0x2) << 0)
+ /* This value controls the wirespeed feature. */
+ /* Wirespeed feature is disabled. */
+ #define HWRM_PORT_PHY_CFG_INPUT_WIOUTPUTEED_OFF UINT32_C(0x0)
+ /* Wirespeed feature is enabled. */
+ #define HWRM_PORT_PHY_CFG_INPUT_WIOUTPUTEED_ON UINT32_C(0x1)
uint8_t lpbk;
-
+ /* This value controls the loopback setting for the PHY. */
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
/*
- * This value is used to configure the pause that will be used for force
- * mode.
+ * The HW will be configured with local loopback
+ * such that host data is sent back to the host
+ * without modification.
*/
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL UINT32_C(0x1)
/*
- * When this bit is '1', Generation of tx pause messages is supported.
- * Disabled otherwise.
+ * The HW will be configured with remote
+ * loopback such that port logic will send
+ * packets back out the transmitter that are
+ * received.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE UINT32_C(0x2)
+ uint8_t force_pause;
/*
- * When this bit is '1', Reception of rx pause messages is supported.
- * Disabled otherwise.
+ * This value is used to configure the pause that will be used
+ * for force mode.
*/
- #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2)
- uint8_t force_pause;
-
- uint8_t unused_1;
-
/*
- * This value controls the pre-emphasis to be used for the link. Driver
- * should not set this value (use enable.preemphasis = 0) unless driver
- * is sure of setting. Normally HWRM FW will determine proper pre-
- * emphasis.
+ * When this bit is '1', Generation of tx pause messages is
+ * supported. Disabled otherwise.
*/
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages is
+ * supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2)
+ uint8_t unused_1;
uint32_t preemphasis;
-
/*
- * Setting for link speed mask that is used to advertise speeds during
- * autonegotiation when EEE is enabled. This field is valid only when
- * EEE is enabled. The speeds specified in this field shall be a subset
- * of speeds specified in auto_link_speed_mask. If EEE is enabled,then
- * at least one speed shall be provided in this mask.
+ * This value controls the pre-emphasis to be used for the link.
+ * Driver should not set this value (use enable.preemphasis = 0)
+ * unless driver is sure of setting. Normally HWRM FW will
+ * determine proper pre-emphasis.
+ */
+ uint16_t eee_link_speed_mask;
+ /*
+ * Setting for link speed mask that is used to advertise speeds
+ * during autonegotiation when EEE is enabled. This field is
+ * valid only when EEE is enabled. The speeds specified in this
+ * field shall be a subset of speeds specified in
+ * auto_link_speed_mask. If EEE is enabled,then at least one
+ * speed shall be provided in this mask.
*/
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1)
/* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4)
/* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 \
- UINT32_C(0x10)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 UINT32_C(0x10)
/* Reserved */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 \
- UINT32_C(0x20)
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB \
- UINT32_C(0x40)
- uint16_t eee_link_speed_mask;
-
+ #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40)
uint8_t unused_2;
uint8_t unused_3;
-
- /*
- * Reuested setting of TX LPI timer in microseconds. This field is valid
- * only when EEE is enabled and TX LPI is enabled.
- */
- #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK \
- UINT32_C(0xffffff)
- #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
uint32_t tx_lpi_timer;
-
uint32_t unused_4;
+ /*
+ * Reuested setting of TX LPI timer in microseconds. This field
+ * is valid only when EEE is enabled and TX LPI is enabled.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0
} __attribute__((packed));
/* Output (16 bytes) */
struct hwrm_port_phy_cfg_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
uint32_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
uint8_t unused_3;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
/* hwrm_port_phy_qcfg */
/* Description: This command queries the PHY configuration for the port. */
/* Input (24 bytes) */
-
struct hwrm_port_phy_qcfg_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /* Port ID of port that is to be queried. */
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
uint16_t port_id;
-
+ /* Port ID of port that is to be queried. */
uint16_t unused_0[3];
} __attribute__((packed));
/* Output (96 bytes) */
struct hwrm_port_phy_qcfg_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
- /* This value indicates the current link status. */
- /* There is no link or cable detected. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK (UINT32_C(0x0) << 0)
- /* There is no link, but a cable has been detected. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL (UINT32_C(0x1) << 0)
- /* There is a link. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK (UINT32_C(0x2) << 0)
uint8_t link;
-
+ /* This value indicates the current link status. */
+ /* There is no link or cable detected. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK UINT32_C(0x0)
+ /* There is no link, but a cable has been detected. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL UINT32_C(0x1)
+ /* There is a link. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK UINT32_C(0x2)
uint8_t unused_0;
-
- /* This value indicates the current link speed of the connection. */
- /* 100Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB \
- (UINT32_C(0x1) << 0)
- /* 1Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB \
- (UINT32_C(0xa) << 0)
- /* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB \
- (UINT32_C(0x14) << 0)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB \
- (UINT32_C(0x19) << 0)
- /* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB \
- (UINT32_C(0x64) << 0)
- /* 20Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB \
- (UINT32_C(0xc8) << 0)
- /* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB \
- (UINT32_C(0xfa) << 0)
- /* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB \
- (UINT32_C(0x190) << 0)
- /* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB \
- (UINT32_C(0x1f4) << 0)
- /* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB \
- (UINT32_C(0x3e8) << 0)
- /* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB \
- (UINT32_C(0xffff) << 0)
uint16_t link_speed;
-
- /* This value is indicates the duplex of the current connection. */
- /* Half Duplex connection. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_HALF (UINT32_C(0x0) << 0)
- /* Full duplex connection. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_FULL (UINT32_C(0x1) << 0)
+ /* This value indicates the current link speed of the connection. */
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB UINT32_C(0x1)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB UINT32_C(0xa)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB UINT32_C(0x19)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB UINT32_C(0x64)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB UINT32_C(0xc8)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB UINT32_C(0xfa)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB UINT32_C(0x190)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB UINT32_C(0x1f4)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff)
uint8_t duplex;
-
+ /* This value is indicates the duplex of the current connection. */
+ /* Half Duplex connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_HALF UINT32_C(0x0)
+ /* Full duplex connection. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_FULL UINT32_C(0x1)
+ uint8_t pause;
/*
- * This value is used to indicate the current pause configuration. When
- * autoneg is enabled, this value represents the autoneg results of
- * pause configuration.
+ * This value is used to indicate the current pause
+ * configuration. When autoneg is enabled, this value represents
+ * the autoneg results of pause configuration.
*/
/*
- * When this bit is '1', Generation of tx pause messages is supported.
- * Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages is
+ * supported. Disabled otherwise.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages is supported.
- * Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages is
+ * supported. Disabled otherwise.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2)
- uint8_t pause;
-
+ uint16_t support_speeds;
/*
- * The supported speeds for the port. This is a bit mask. For each speed
- * that is supported, the corrresponding bit will be set to '1'.
+ * The supported speeds for the port. This is a bit mask. For
+ * each speed that is supported, the corrresponding bit will be
+ * set to '1'.
*/
/* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \
+ UINT32_C(0x1)
/* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB \
- UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB UINT32_C(0x2)
/* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD \
- UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD UINT32_C(0x4)
/* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB \
- UINT32_C(0x8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB \
- UINT32_C(0x10)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB UINT32_C(0x10)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB \
- UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB \
- UINT32_C(0x40)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB \
- UINT32_C(0x80)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB \
- UINT32_C(0x100)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB \
- UINT32_C(0x200)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB \
- UINT32_C(0x400)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB \
- UINT32_C(0x800)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB UINT32_C(0x800)
/* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD \
- UINT32_C(0x1000)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD UINT32_C(0x1000)
/* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB \
- UINT32_C(0x2000)
- uint16_t support_speeds;
-
- /*
- * Current setting of forced link speed. When the link speed is not
- * being forced, this value shall be set to 0.
- */
- /* 100Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB \
- (UINT32_C(0x1) << 0)
- /* 1Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB \
- (UINT32_C(0xa) << 0)
- /* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB \
- (UINT32_C(0x14) << 0)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB \
- (UINT32_C(0x19) << 0)
- /* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB \
- (UINT32_C(0x64) << 0)
- /* 20Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB \
- (UINT32_C(0xc8) << 0)
- /* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB \
- (UINT32_C(0xfa) << 0)
- /* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB \
- (UINT32_C(0x190) << 0)
- /* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB \
- (UINT32_C(0x1f4) << 0)
- /* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \
- (UINT32_C(0x3e8) << 0)
- /* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB \
- (UINT32_C(0xffff) << 0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB UINT32_C(0x2000)
uint16_t force_link_speed;
-
- /* Current setting of auto negotiation mode. */
- /*
- * Disable autoneg or autoneg disabled. No speeds are selected.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE \
- (UINT32_C(0x0) << 0)
- /* Select all possible speeds for autoneg mode. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS \
- (UINT32_C(0x1) << 0)
- /*
- * Select only the auto_link_speed speed for autoneg mode. This
- * mode has been DEPRECATED. An HWRM client should not use this
- * mode.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED \
- (UINT32_C(0x2) << 0)
- /*
- * Select the auto_link_speed or any speed below that speed for
- * autoneg. This mode has been DEPRECATED. An HWRM client should
- * not use this mode.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW \
- (UINT32_C(0x3) << 0)
- /*
- * Select the speeds based on the corresponding link speed mask
- * value that is provided.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK \
- (UINT32_C(0x4) << 0)
+ /*
+ * Current setting of forced link speed. When the link speed is
+ * not being forced, this value shall be set to 0.
+ */
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB \
+ UINT32_C(0x1)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB UINT32_C(0xa)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB \
+ UINT32_C(0x19)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB UINT32_C(0x64)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB UINT32_C(0xc8)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB UINT32_C(0xfa)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB UINT32_C(0x190)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB UINT32_C(0x1f4)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \
+ UINT32_C(0x3e8)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB UINT32_C(0xffff)
uint8_t auto_mode;
-
+ /* Current setting of auto negotiation mode. */
/*
- * Current setting of pause autonegotiation. Move autoneg_pause flag
- * here.
+ * Disable autoneg or autoneg disabled. No
+ * speeds are selected.
*/
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE UINT32_C(0x0)
+ /* Select all possible speeds for autoneg mode. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * When this bit is '1', Generation of tx pause messages has been
- * requested. Disabled otherwise.
+ * Select only the auto_link_speed speed for
+ * autoneg mode. This mode has been DEPRECATED.
+ * An HWRM client should not use this mode.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
+ /*
+ * Select the auto_link_speed or any speed below
+ * that speed for autoneg. This mode has been
+ * DEPRECATED. An HWRM client should not use
+ * this mode.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW \
+ UINT32_C(0x3)
+ /*
+ * Select the speeds based on the corresponding
+ * link speed mask value that is provided.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
+ uint8_t auto_pause;
+ /*
+ * Current setting of pause autonegotiation. Move autoneg_pause
+ * flag here.
+ */
+ /*
+ * When this bit is '1', Generation of tx pause messages has
+ * been requested. Disabled otherwise.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX UINT32_C(0x1)
/*
@@ -3060,1136 +3165,1694 @@ struct hwrm_port_phy_qcfg_output {
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX UINT32_C(0x2)
/*
- * When set to 1, the advertisement of pause is enabled. # When the
- * auto_mode is not set to none and this flag is set to 1, then the
- * auto_pause bits on this port are being advertised and autoneg pause
- * results are being interpreted. # When the auto_mode is not set to
- * none and this flag is set to 0, the pause is forced as indicated in
- * force_pause, and also advertised as auto_pause bits, but the autoneg
- * results are not interpreted since the pause configuration is being
- * forced. # When the auto_mode is set to none and this flag is set to
- * 1, auto_pause bits should be ignored and should be set to 0.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \
- UINT32_C(0x4)
- uint8_t auto_pause;
-
- /*
- * Current setting for auto_link_speed. This field is only valid when
- * auto_mode is set to "one_speed" or "one_or_below".
- */
- /* 100Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB \
- (UINT32_C(0x1) << 0)
- /* 1Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB \
- (UINT32_C(0xa) << 0)
- /* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB \
- (UINT32_C(0x14) << 0)
- /* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB \
- (UINT32_C(0x19) << 0)
- /* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB \
- (UINT32_C(0x64) << 0)
- /* 20Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB \
- (UINT32_C(0xc8) << 0)
- /* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB \
- (UINT32_C(0xfa) << 0)
- /* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB \
- (UINT32_C(0x190) << 0)
- /* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB \
- (UINT32_C(0x1f4) << 0)
- /* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB \
- (UINT32_C(0x3e8) << 0)
- /* 10Mb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB \
- (UINT32_C(0xffff) << 0)
+ * When set to 1, the advertisement of pause is enabled. # When
+ * the auto_mode is not set to none and this flag is set to 1,
+ * then the auto_pause bits on this port are being advertised
+ * and autoneg pause results are being interpreted. # When the
+ * auto_mode is not set to none and this flag is set to 0, the
+ * pause is forced as indicated in force_pause, and also
+ * advertised as auto_pause bits, but the autoneg results are
+ * not interpreted since the pause configuration is being
+ * forced. # When the auto_mode is set to none and this flag is
+ * set to 1, auto_pause bits should be ignored and should be set
+ * to 0.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \
+ UINT32_C(0x4)
uint16_t auto_link_speed;
-
/*
- * Current setting for auto_link_speed_mask that is used to advertise
- * speeds during autonegotiation. This field is only valid when
- * auto_mode is set to "mask". The speeds specified in this field shall
- * be a subset of supported speeds on this port.
+ * Current setting for auto_link_speed. This field is only valid
+ * when auto_mode is set to "one_speed" or "one_or_below".
+ */
+ /* 100Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB UINT32_C(0x1)
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB UINT32_C(0xa)
+ /* 2Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB UINT32_C(0x14)
+ /* 2.5Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB UINT32_C(0x19)
+ /* 10Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB UINT32_C(0x64)
+ /* 20Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB UINT32_C(0xc8)
+ /* 25Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB UINT32_C(0xfa)
+ /* 40Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB UINT32_C(0x190)
+ /* 50Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB UINT32_C(0x1f4)
+ /* 100Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB UINT32_C(0x3e8)
+ /* 10Mb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB UINT32_C(0xffff)
+ uint16_t auto_link_speed_mask;
+ /*
+ * Current setting for auto_link_speed_mask that is used to
+ * advertise speeds during autonegotiation. This field is only
+ * valid when auto_mode is set to "mask". The speeds specified
+ * in this field shall be a subset of supported speeds on this
+ * port.
*/
/* 100Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \
+ UINT32_C(0x1)
/* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
- UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
/* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
- UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \
+ UINT32_C(0x4)
/* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \
- UINT32_C(0x8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \
- UINT32_C(0x10)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \
+ UINT32_C(0x10)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
- UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \
+ UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \
- UINT32_C(0x40)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \
- UINT32_C(0x80)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \
+ UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \
- UINT32_C(0x100)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \
+ UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \
- UINT32_C(0x200)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \
+ UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \
- UINT32_C(0x400)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \
+ UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
- UINT32_C(0x800)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \
+ UINT32_C(0x800)
/* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \
- UINT32_C(0x1000)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \
+ UINT32_C(0x1000)
/* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \
- UINT32_C(0x2000)
- uint16_t auto_link_speed_mask;
-
- /* Current setting for wirespeed. */
- /* Wirespeed feature is disabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_OFF (UINT32_C(0x0) << 0)
- /* Wirespeed feature is enabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON (UINT32_C(0x1) << 0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \
+ UINT32_C(0x2000)
uint8_t wirespeed;
-
- /* Current setting for loopback. */
- /* No loopback is selected. Normal operation. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE (UINT32_C(0x0) << 0)
- /*
- * The HW will be configured with local loopback such that host
- * data is sent back to the host without modification.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL (UINT32_C(0x1) << 0)
- /*
- * The HW will be configured with remote loopback such that port
- * logic will send packets back out the transmitter that are
- * received.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE (UINT32_C(0x2) << 0)
+ /* Current setting for wirespeed. */
+ /* Wirespeed feature is disabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIOUTPUTEED_OFF UINT32_C(0x0)
+ /* Wirespeed feature is enabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_WIOUTPUTEED_ON UINT32_C(0x1)
uint8_t lpbk;
-
+ /* Current setting for loopback. */
+ /* No loopback is selected. Normal operation. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
/*
- * Current setting of forced pause. When the pause configuration is not
- * being forced, then this value shall be set to 0.
+ * The HW will be configured with local loopback
+ * such that host data is sent back to the host
+ * without modification.
*/
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL UINT32_C(0x1)
/*
- * When this bit is '1', Generation of tx pause messages is supported.
- * Disabled otherwise.
+ * The HW will be configured with remote
+ * loopback such that port logic will send
+ * packets back out the transmitter that are
+ * received.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE UINT32_C(0x2)
+ uint8_t force_pause;
/*
- * When this bit is '1', Reception of rx pause messages is supported.
- * Disabled otherwise.
+ * Current setting of forced pause. When the pause configuration
+ * is not being forced, then this value shall be set to 0.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX \
- UINT32_C(0x2)
- uint8_t force_pause;
-
/*
- * This value indicates the current status of the optics module on this
- * port.
+ * When this bit is '1', Generation of tx pause messages is
+ * supported. Disabled otherwise.
*/
- /* Module is inserted and accepted */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE \
- (UINT32_C(0x0) << 0)
- /* Module is rejected and transmit side Laser is disabled. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \
- (UINT32_C(0x1) << 0)
- /* Module mismatch warning. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \
- (UINT32_C(0x2) << 0)
- /* Module is rejected and powered down. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN \
- (UINT32_C(0x3) << 0)
- /* Module is not inserted. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \
- (UINT32_C(0x4) << 0)
- /* Module status is not applicable. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \
- (UINT32_C(0xff) << 0)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX UINT32_C(0x1)
+ /*
+ * When this bit is '1', Reception of rx pause messages is
+ * supported. Disabled otherwise.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX UINT32_C(0x2)
uint8_t module_status;
-
- /* Current setting for preemphasis. */
+ /*
+ * This value indicates the current status of the optics module
+ * on this port.
+ */
+ /* Module is inserted and accepted */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE UINT32_C(0x0)
+ /* Module is rejected and transmit side Laser is disabled. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \
+ UINT32_C(0x1)
+ /* Module mismatch warning. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \
+ UINT32_C(0x2)
+ /* Module is rejected and powered down. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN UINT32_C(0x3)
+ /* Module is not inserted. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \
+ UINT32_C(0x4)
+ /* Module status is not applicable. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \
+ UINT32_C(0xff)
uint32_t preemphasis;
-
- /* This field represents the major version of the PHY. */
+ /* Current setting for preemphasis. */
uint8_t phy_maj;
-
- /* This field represents the minor version of the PHY. */
+ /* This field represents the major version of the PHY. */
uint8_t phy_min;
-
- /* This field represents the build version of the PHY. */
+ /* This field represents the minor version of the PHY. */
uint8_t phy_bld;
-
- /* This value represents a PHY type. */
- /* Unknown */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN \
- (UINT32_C(0x0) << 0)
- /* BASE-CR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR \
- (UINT32_C(0x1) << 0)
- /* BASE-KR4 (Deprecated) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 \
- (UINT32_C(0x2) << 0)
- /* BASE-LR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR \
- (UINT32_C(0x3) << 0)
- /* BASE-SR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR \
- (UINT32_C(0x4) << 0)
- /* BASE-KR2 (Deprecated) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 \
- (UINT32_C(0x5) << 0)
- /* BASE-KX */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX \
- (UINT32_C(0x6) << 0)
- /* BASE-KR */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR \
- (UINT32_C(0x7) << 0)
- /* BASE-T */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET \
- (UINT32_C(0x8) << 0)
- /* EEE capable BASE-T */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE \
- (UINT32_C(0x9) << 0)
- /* SGMII connected external PHY */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY \
- (UINT32_C(0xa) << 0)
+ /* This field represents the build version of the PHY. */
uint8_t phy_type;
-
- /* This value represents a media type. */
- /* Unknown */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN \
- (UINT32_C(0x0) << 0)
- /* Twisted Pair */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP (UINT32_C(0x1) << 0)
- /* Direct Attached Copper */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC \
- (UINT32_C(0x2) << 0)
- /* Fiber */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE \
- (UINT32_C(0x3) << 0)
+ /* This value represents a PHY type. */
+ /* Unknown */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN UINT32_C(0x0)
+ /* BASE-CR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR UINT32_C(0x1)
+ /* BASE-KR4 (Deprecated) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 UINT32_C(0x2)
+ /* BASE-LR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR UINT32_C(0x3)
+ /* BASE-SR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR UINT32_C(0x4)
+ /* BASE-KR2 (Deprecated) */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 UINT32_C(0x5)
+ /* BASE-KX */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX UINT32_C(0x6)
+ /* BASE-KR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR UINT32_C(0x7)
+ /* BASE-T */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET UINT32_C(0x8)
+ /* EEE capable BASE-T */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE UINT32_C(0x9)
+ /* SGMII connected external PHY */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY UINT32_C(0xa)
uint8_t media_type;
-
- /* This value represents a transceiver type. */
- /* PHY and MAC are in the same package */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \
- (UINT32_C(0x1) << 0)
- /* PHY and MAC are in different packages */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \
- (UINT32_C(0x2) << 0)
+ /* This value represents a media type. */
+ /* Unknown */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN UINT32_C(0x0)
+ /* Twisted Pair */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP UINT32_C(0x1)
+ /* Direct Attached Copper */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC UINT32_C(0x2)
+ /* Fiber */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE UINT32_C(0x3)
uint8_t xcvr_pkg_type;
-
+ /* This value represents a transceiver type. */
+ /* PHY and MAC are in the same package */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \
+ UINT32_C(0x1)
+ /* PHY and MAC are in different packages */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \
+ UINT32_C(0x2)
+ uint8_t eee_config_phy_addr;
/*
- * This field represents flags related to EEE configuration. These EEE
- * configuration flags are valid only when the auto_mode is not set to
- * none (in other words autonegotiation is enabled).
+ * This field represents flags related to EEE configuration.
+ * These EEE configuration flags are valid only when the
+ * auto_mode is not set to none (in other words autonegotiation
+ * is enabled).
*/
/* This field represents PHY address. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f)
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0
/*
- * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled.
- * Speeds for autoneg with EEE mode enabled are based on
- * eee_link_speed_mask.
+ * When set to 1, Energy Efficient Ethernet (EEE) mode is
+ * enabled. Speeds for autoneg with EEE mode enabled are based
+ * on eee_link_speed_mask.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \
- UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \
+ UINT32_C(0x20)
/*
* This flag is valid only when eee_enabled is set to 1. # If
- * eee_enabled is set to 0, then EEE mode is disabled and this flag
- * shall be ignored. # If eee_enabled is set to 1 and this flag is set
- * to 1, then Energy Efficient Ethernet (EEE) mode is enabled and in
- * use. # If eee_enabled is set to 1 and this flag is set to 0, then
- * Energy Efficient Ethernet (EEE) mode is enabled but is currently not
- * in use.
+ * eee_enabled is set to 0, then EEE mode is disabled and this
+ * flag shall be ignored. # If eee_enabled is set to 1 and this
+ * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
+ * is enabled and in use. # If eee_enabled is set to 1 and this
+ * flag is set to 0, then Energy Efficient Ethernet (EEE) mode
+ * is enabled but is currently not in use.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE \
- UINT32_C(0x40)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE UINT32_C(0x40)
/*
* This flag is valid only when eee_enabled is set to 1. # If
- * eee_enabled is set to 0, then EEE mode is disabled and this flag
- * shall be ignored. # If eee_enabled is set to 1 and this flag is set
- * to 1, then Energy Efficient Ethernet (EEE) mode is enabled and TX LPI
- * is enabled. # If eee_enabled is set to 1 and this flag is set to 0,
- * then Energy Efficient Ethernet (EEE) mode is enabled but TX LPI is
- * disabled.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI \
- UINT32_C(0x80)
- /*
- * This field represents flags related to EEE configuration. These EEE
- * configuration flags are valid only when the auto_mode is not set to
- * none (in other words autonegotiation is enabled).
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK \
- UINT32_C(0xe0)
+ * eee_enabled is set to 0, then EEE mode is disabled and this
+ * flag shall be ignored. # If eee_enabled is set to 1 and this
+ * flag is set to 1, then Energy Efficient Ethernet (EEE) mode
+ * is enabled and TX LPI is enabled. # If eee_enabled is set to
+ * 1 and this flag is set to 0, then Energy Efficient Ethernet
+ * (EEE) mode is enabled but TX LPI is disabled.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI UINT32_C(0x80)
+ /*
+ * This field represents flags related to EEE configuration.
+ * These EEE configuration flags are valid only when the
+ * auto_mode is not set to none (in other words autonegotiation
+ * is enabled).
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK UINT32_C(0xe0)
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5
- uint8_t eee_config_phy_addr;
-
+ uint8_t parallel_detect;
/* Reserved field, set to 0 */
/*
- * When set to 1, the parallel detection is used to determine the speed
- * of the link partner. Parallel detection is used when a
- * autonegotiation capable device is connected to a link parter that is
- * not capable of autonegotiation.
+ * When set to 1, the parallel detection is used to determine
+ * the speed of the link partner. Parallel detection is used
+ * when a autonegotiation capable device is connected to a link
+ * parter that is not capable of autonegotiation.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT UINT32_C(0x1)
/* Reserved field, set to 0 */
#define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_MASK UINT32_C(0xfe)
#define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_SFT 1
- uint8_t parallel_detect;
-
+ uint16_t link_partner_adv_speeds;
/*
* The advertised speeds for the port by the link partner. Each
* advertised speed will be set to '1'.
*/
/* 100Mb link speed (Half-duplex) */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \
- UINT32_C(0x1)
+ UINT32_C(0x1)
/* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \
- UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \
+ UINT32_C(0x2)
/* 1Gb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \
- UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \
+ UINT32_C(0x4)
/* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \
- UINT32_C(0x8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \
+ UINT32_C(0x8)
/* 2Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \
- UINT32_C(0x10)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \
+ UINT32_C(0x10)
/* 2.5Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \
- UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \
+ UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \
- UINT32_C(0x40)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \
+ UINT32_C(0x40)
/* 20Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \
- UINT32_C(0x80)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \
+ UINT32_C(0x80)
/* 25Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \
- UINT32_C(0x100)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \
+ UINT32_C(0x100)
/* 40Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \
- UINT32_C(0x200)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \
+ UINT32_C(0x200)
/* 50Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \
- UINT32_C(0x400)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \
+ UINT32_C(0x400)
/* 100Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \
- UINT32_C(0x800)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \
+ UINT32_C(0x800)
/* 10Mb link speed (Half-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \
- UINT32_C(0x1000)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \
+ UINT32_C(0x1000)
/* 10Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \
- UINT32_C(0x2000)
- uint16_t link_partner_adv_speeds;
-
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \
+ UINT32_C(0x2000)
+ uint8_t link_partner_adv_auto_mode;
/*
- * The advertised autoneg for the port by the link partner. This field
- * is deprecated and should be set to 0.
+ * The advertised autoneg for the port by the link partner. This
+ * field is deprecated and should be set to 0.
+ */
+ /*
+ * Disable autoneg or autoneg disabled. No
+ * speeds are selected.
*/
- /*
- * Disable autoneg or autoneg disabled. No speeds are selected.
- */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \
- (UINT32_C(0x0) << 0)
- /* Select all possible speeds for autoneg mode. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS\
- (UINT32_C(0x1) << 0)
- /*
- * Select only the auto_link_speed speed for autoneg mode. This
- * mode has been DEPRECATED. An HWRM client should not use this
- * mode.
- */
+ UINT32_C(0x0)
+ /* Select all possible speeds for autoneg mode. */
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS \
+ UINT32_C(0x1)
+ /*
+ * Select only the auto_link_speed speed for
+ * autoneg mode. This mode has been DEPRECATED.
+ * An HWRM client should not use this mode.
+ */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \
- (UINT32_C(0x2) << 0)
- /*
- * Select the auto_link_speed or any speed below that speed for
- * autoneg. This mode has been DEPRECATED. An HWRM client should
- * not use this mode.
- */
+ UINT32_C(0x2)
+ /*
+ * Select the auto_link_speed or any speed below
+ * that speed for autoneg. This mode has been
+ * DEPRECATED. An HWRM client should not use
+ * this mode.
+ */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \
- (UINT32_C(0x3) << 0)
- /*
- * Select the speeds based on the corresponding link speed mask
- * value that is provided.
- */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK\
- (UINT32_C(0x4) << 0)
- uint8_t link_partner_adv_auto_mode;
-
+ UINT32_C(0x3)
+ /*
+ * Select the speeds based on the corresponding
+ * link speed mask value that is provided.
+ */
+ #define \
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK \
+ UINT32_C(0x4)
+ uint8_t link_partner_adv_pause;
/* The advertised pause settings on the port by the link partner. */
/*
- * When this bit is '1', Generation of tx pause messages is supported.
- * Disabled otherwise.
+ * When this bit is '1', Generation of tx pause messages is
+ * supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \
+ UINT32_C(0x1)
/*
- * When this bit is '1', Reception of rx pause messages is supported.
- * Disabled otherwise.
+ * When this bit is '1', Reception of rx pause messages is
+ * supported. Disabled otherwise.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \
- UINT32_C(0x2)
- uint8_t link_partner_adv_pause;
-
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \
+ UINT32_C(0x2)
+ uint16_t adv_eee_link_speed_mask;
/*
- * Current setting for link speed mask that is used to advertise speeds
- * during autonegotiation when EEE is enabled. This field is valid only
- * when eee_enabled flags is set to 1. The speeds specified in this
- * field shall be a subset of speeds specified in auto_link_speed_mask.
+ * Current setting for link speed mask that is used to advertise
+ * speeds during autonegotiation when EEE is enabled. This field
+ * is valid only when eee_enabled flags is set to 1. The speeds
+ * specified in this field shall be a subset of speeds specified
+ * in auto_link_speed_mask.
*/
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
- UINT32_C(0x1)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
+ UINT32_C(0x1)
/* 100Mb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \
- UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \
+ UINT32_C(0x2)
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
- UINT32_C(0x4)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
+ UINT32_C(0x4)
/* 1Gb link speed (Full-duplex) */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \
- UINT32_C(0x8)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \
+ UINT32_C(0x8)
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
- UINT32_C(0x10)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
+ UINT32_C(0x10)
/* Reserved */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
- UINT32_C(0x20)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
+ UINT32_C(0x20)
/* 10Gb link speed */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \
- UINT32_C(0x40)
- uint16_t adv_eee_link_speed_mask;
-
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \
+ UINT32_C(0x40)
+ uint16_t link_partner_adv_eee_link_speed_mask;
/*
- * Current setting for link speed mask that is advertised by the link
- * partner when EEE is enabled. This field is valid only when
- * eee_enabled flags is set to 1.
+ * Current setting for link speed mask that is advertised by the
+ * link partner when EEE is enabled. This field is valid only
+ * when eee_enabled flags is set to 1.
*/
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \
- UINT32_C(0x1)
+ UINT32_C(0x1)
/* 100Mb link speed (Full-duplex) */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \
- UINT32_C(0x2)
+ UINT32_C(0x2)
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \
- UINT32_C(0x4)
+ UINT32_C(0x4)
/* 1Gb link speed (Full-duplex) */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \
- UINT32_C(0x8)
+ UINT32_C(0x8)
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \
- UINT32_C(0x10)
+ UINT32_C(0x10)
/* Reserved */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \
- UINT32_C(0x20)
+ UINT32_C(0x20)
/* 10Gb link speed */
#define \
HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \
- UINT32_C(0x40)
- uint16_t link_partner_adv_eee_link_speed_mask;
-
+ UINT32_C(0x40)
+ uint32_t xcvr_identifier_type_tx_lpi_timer;
/* This value represents transceiver identifier type. */
/*
- * Current setting of TX LPI timer in microseconds. This field is valid
- * only when_eee_enabled flag is set to 1 and tx_lpi_enabled is set to
- * 1.
+ * Current setting of TX LPI timer in microseconds. This field
+ * is valid only when_eee_enabled flag is set to 1 and
+ * tx_lpi_enabled is set to 1.
*/
- #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \
- UINT32_C(0xffffff)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \
+ UINT32_C(0xffffff)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0
/* This value represents transceiver identifier type. */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \
- UINT32_C(0xff000000)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT \
- 24
- /* Unknown */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \
- (UINT32_C(0x0) << 24)
- /* SFP/SFP+/SFP28 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \
- (UINT32_C(0x3) << 24)
- /* QSFP */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \
- (UINT32_C(0xc) << 24)
- /* QSFP+ */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \
- (UINT32_C(0xd) << 24)
- /* QSFP28 */
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \
- (UINT32_C(0x11) << 24)
- uint32_t xcvr_identifier_type_tx_lpi_timer;
-
- uint32_t unused_1;
-
- /*
- * Up to 16 bytes of null padded ASCII string representing PHY vendor.
- * If the string is set to null, then the vendor name is not available.
- */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \
+ UINT32_C(0xff000000)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT 24
+ /* Unknown */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \
+ (UINT32_C(0x0) << 24)
+ /* SFP/SFP+/SFP28 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \
+ (UINT32_C(0x3) << 24)
+ /* QSFP */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \
+ (UINT32_C(0xc) << 24)
+ /* QSFP+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \
+ (UINT32_C(0xd) << 24)
+ /* QSFP28 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \
+ (UINT32_C(0x11) << 24)
+ uint16_t fec_cfg;
+ /*
+ * This value represents the current configuration of Forward
+ * Error Correction (FEC) on the port.
+ */
+ /*
+ * When set to 1, then FEC is not supported on this port. If
+ * this flag is set to 1, then all other FEC configuration flags
+ * shall be ignored. When set to 0, then FEC is supported as
+ * indicated by other configuration flags. If no cable is
+ * attached and the HWRM does not yet know the FEC capability,
+ * then the HWRM shall set this flag to 1 when reporting FEC
+ * capability.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED \
+ UINT32_C(0x1)
+ /*
+ * When set to 1, then FEC autonegotiation is supported on this
+ * port. When set to 0, then FEC autonegotiation is not
+ * supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED \
+ UINT32_C(0x2)
+ /*
+ * When set to 1, then FEC autonegotiation is enabled on this
+ * port. When set to 0, then FEC autonegotiation is disabled if
+ * supported. This flag should be ignored if FEC autonegotiation
+ * is not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED \
+ UINT32_C(0x4)
+ /*
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on
+ * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
+ * not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED \
+ UINT32_C(0x8)
+ /*
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on
+ * this port. When set to 0, then FEC CLAUSE 74 (Fire Code) is
+ * disabled if supported. This flag should be ignored if FEC
+ * CLAUSE 74 is not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED \
+ UINT32_C(0x10)
+ /*
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is supported
+ * on this port. When set to 0, then FEC CLAUSE 91 (Reed
+ * Solomon) is not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_SUPPORTED \
+ UINT32_C(0x20)
+ /*
+ * When set to 1, then FEC CLAUSE 91 (Reed Solomon) is enabled
+ * on this port. When set to 0, then FEC CLAUSE 91 (Reed
+ * Solomon) is disabled if supported. This flag should be
+ * ignored if FEC CLAUSE 91 is not supported on this port.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \
+ UINT32_C(0x40)
+ uint8_t unused_1;
+ uint8_t unused_2;
char phy_vendor_name[16];
-
/*
- * Up to 16 bytes of null padded ASCII string that identifies vendor
- * specific part number of the PHY. If the string is set to null, then
- * the vendor specific part number is not available.
+ * Up to 16 bytes of null padded ASCII string representing PHY
+ * vendor. If the string is set to null, then the vendor name is
+ * not available.
*/
char phy_vendor_partnumber[16];
-
- uint32_t unused_2;
- uint8_t unused_3;
+ /*
+ * Up to 16 bytes of null padded ASCII string that identifies
+ * vendor specific part number of the PHY. If the string is set
+ * to null, then the vendor specific part number is not
+ * available.
+ */
+ uint32_t unused_3;
uint8_t unused_4;
uint8_t unused_5;
-
+ uint8_t unused_6;
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* hwrm_ver_get */
+/* hwrm_queue_qportcfg */
/*
- * Description: This function is called by a driver to determine the HWRM
- * interface version supported by the HWRM firmware, the version of HWRM
- * firmware implementation, the name of HWRM firmware, the versions of other
- * embedded firmwares, and the names of other embedded firmwares, etc. Any
- * interface or firmware version with major = 0, minor = 0, and update = 0 shall
- * be considered an invalid version.
+ * Description: This function is called by a driver to query queue configuration
+ * of a port. # The HWRM shall at least advertise one queue with lossy service
+ * profile. # The driver shall use this command to query queue ids before
+ * configuring or using any queues. # If a service profile is not set for a
+ * queue, then the driver shall not use that queue without configuring a service
+ * profile for it. # If the driver is not allowed to configure service profiles,
+ * then the driver shall only use queues for which service profiles are pre-
+ * configured.
*/
-
/* Input (24 bytes) */
-struct hwrm_ver_get_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+struct hwrm_queue_qportcfg_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
/*
- * This field represents the major version of HWRM interface
- * specification supported by the driver HWRM implementation. The
- * interface major version is intended to change only when non backward
- * compatible changes are made to the HWRM interface specification.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
- uint8_t hwrm_intf_maj;
-
+ uint32_t flags;
/*
- * This field represents the minor version of HWRM interface
- * specification supported by the driver HWRM implementation. A change
- * in interface minor version is used to reflect significant backward
- * compatible modification to HWRM interface specification. This can be
- * due to addition or removal of functionality. HWRM interface
- * specifications with the same major version but different minor
- * versions are compatible.
- */
- uint8_t hwrm_intf_min;
-
+ * Enumeration denoting the RX, TX type of the resource. This
+ * enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
+ */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
+ /* rx path */
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
+ #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \
+ QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
+ uint16_t port_id;
/*
- * This field represents the update version of HWRM interface
- * specification supported by the driver HWRM implementation. The
- * interface update version is used to reflect minor changes or bug
- * fixes to a released HWRM interface specification.
+ * Port ID of port for which the queue configuration is being
+ * queried. This field is only required when sent by IPC.
*/
- uint8_t hwrm_intf_upd;
-
- uint8_t unused_0[5];
+ uint16_t unused_0;
} __attribute__((packed));
-/* Output (128 bytes) */
-struct hwrm_ver_get_output {
+/* Output (32 bytes) */
+struct hwrm_queue_qportcfg_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
+ uint8_t max_configurable_queues;
/*
- * This field represents the major version of HWRM interface
- * specification supported by the HWRM implementation. The interface
- * major version is intended to change only when non backward compatible
- * changes are made to the HWRM interface specification. A HWRM
- * implementation that is compliant with this specification shall
- * provide value of 1 in this field.
+ * The maximum number of queues that can be configured on this
+ * port. Valid values range from 1 through 8.
*/
- uint8_t hwrm_intf_maj;
-
+ uint8_t max_configurable_lossless_queues;
/*
- * This field represents the minor version of HWRM interface
- * specification supported by the HWRM implementation. A change in
- * interface minor version is used to reflect significant backward
- * compatible modification to HWRM interface specification. This can be
- * due to addition or removal of functionality. HWRM interface
- * specifications with the same major version but different minor
- * versions are compatible. A HWRM implementation that is compliant with
- * this specification shall provide value of 0 in this field.
+ * The maximum number of lossless queues that can be configured
+ * on this port. Valid values range from 0 through 8.
*/
- uint8_t hwrm_intf_min;
-
+ uint8_t queue_cfg_allowed;
/*
- * This field represents the update version of HWRM interface
- * specification supported by the HWRM implementation. The interface
- * update version is used to reflect minor changes or bug fixes to a
- * released HWRM interface specification. A HWRM implementation that is
- * compliant with this specification shall provide value of 1 in this
- * field.
+ * Bitmask indicating which queues can be configured by the
+ * hwrm_queue_cfg command. Each bit represents a specific queue
+ * where bit 0 represents queue 0 and bit 7 represents queue 7.
+ * # A value of 0 indicates that the queue is not configurable
+ * by the hwrm_queue_cfg command. # A value of 1 indicates that
+ * the queue is configurable. # A hwrm_queue_cfg command shall
+ * return error when trying to configure a queue not
+ * configurable.
+ */
+ uint8_t queue_cfg_info;
+ /* Information about queue configuration. */
+ /*
+ * If this flag is set to '1', then the queues are configured
+ * asymmetrically on TX and RX sides. If this flag is set to
+ * '0', then the queues are configured symmetrically on TX and
+ * RX sides. For symmetric configuration, the queue
+ * configuration including queue ids and service profiles on the
+ * TX side is the same as the corresponding queue configuration
+ * on the RX side.
+ */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG \
+ UINT32_C(0x1)
+ uint8_t queue_pfcenable_cfg_allowed;
+ /*
+ * Bitmask indicating which queues can be configured by the
+ * hwrm_queue_pfcenable_cfg command. Each bit represents a
+ * specific queue where bit 0 represents queue 0 and bit 7
+ * represents queue 7. # A value of 0 indicates that the queue
+ * is not configurable by the hwrm_queue_pfcenable_cfg command.
+ * # A value of 1 indicates that the queue is configurable. # A
+ * hwrm_queue_pfcenable_cfg command shall return error when
+ * trying to configure a queue that is not configurable.
*/
- uint8_t hwrm_intf_upd;
-
- uint8_t hwrm_intf_rsvd;
-
+ uint8_t queue_pri2cos_cfg_allowed;
/*
- * This field represents the major version of HWRM firmware. A change in
- * firmware major version represents a major firmware release.
+ * Bitmask indicating which queues can be configured by the
+ * hwrm_queue_pri2cos_cfg command. Each bit represents a
+ * specific queue where bit 0 represents queue 0 and bit 7
+ * represents queue 7. # A value of 0 indicates that the queue
+ * is not configurable by the hwrm_queue_pri2cos_cfg command. #
+ * A value of 1 indicates that the queue is configurable. # A
+ * hwrm_queue_pri2cos_cfg command shall return error when trying
+ * to configure a queue that is not configurable.
*/
- uint8_t hwrm_fw_maj;
-
+ uint8_t queue_cos2bw_cfg_allowed;
/*
- * This field represents the minor version of HWRM firmware. A change in
- * firmware minor version represents significant firmware functionality
- * changes.
+ * Bitmask indicating which queues can be configured by the
+ * hwrm_queue_pri2cos_cfg command. Each bit represents a
+ * specific queue where bit 0 represents queue 0 and bit 7
+ * represents queue 7. # A value of 0 indicates that the queue
+ * is not configurable by the hwrm_queue_pri2cos_cfg command. #
+ * A value of 1 indicates that the queue is configurable. # A
+ * hwrm_queue_pri2cos_cfg command shall return error when trying
+ * to configure a queue not configurable.
*/
- uint8_t hwrm_fw_min;
-
+ uint8_t queue_id0;
/*
- * This field represents the build version of HWRM firmware. A change in
- * firmware build version represents bug fixes to a released firmware.
+ * ID of CoS Queue 0. FF - Invalid id # This ID can be used on
+ * any subsequent call to an hwrm command that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software. # Any driver or software should
+ * not make any assumptions about queue IDs. # A value of 0xff
+ * indicates that the queue is not available. # Available queues
+ * may not be in sequential order.
*/
- uint8_t hwrm_fw_bld;
-
+ uint8_t queue_id0_service_profile;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
/*
- * This field is a reserved field. This field can be used to represent
- * firmware branches or customer specific releases tied to a specific
- * (major,minor,update) version of the HWRM firmware.
+ * Set to 0xFF... (All Fs) if there is no
+ * service profile specified
*/
- uint8_t hwrm_fw_rsvd;
-
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ uint8_t queue_id1;
/*
- * This field represents the major version of mgmt firmware. A change in
- * major version represents a major release.
+ * ID of CoS Queue 1. FF - Invalid id # This ID can be used on
+ * any subsequent call to an hwrm command that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software. # Any driver or software should
+ * not make any assumptions about queue IDs. # A value of 0xff
+ * indicates that the queue is not available. # Available queues
+ * may not be in sequential order.
*/
- uint8_t mgmt_fw_maj;
-
+ uint8_t queue_id1_service_profile;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
/*
- * This field represents the minor version of mgmt firmware. A change in
- * minor version represents significant functionality changes.
+ * Set to 0xFF... (All Fs) if there is no
+ * service profile specified
*/
- uint8_t mgmt_fw_min;
-
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ uint8_t queue_id2;
/*
- * This field represents the build version of mgmt firmware. A change in
- * update version represents bug fixes.
+ * ID of CoS Queue 2. FF - Invalid id # This ID can be used on
+ * any subsequent call to an hwrm command that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software. # Any driver or software should
+ * not make any assumptions about queue IDs. # A value of 0xff
+ * indicates that the queue is not available. # Available queues
+ * may not be in sequential order.
*/
- uint8_t mgmt_fw_bld;
-
+ uint8_t queue_id2_service_profile;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
/*
- * This field is a reserved field. This field can be used to represent
- * firmware branches or customer specific releases tied to a specific
- * (major,minor,update) version
+ * Set to 0xFF... (All Fs) if there is no
+ * service profile specified
*/
- uint8_t mgmt_fw_rsvd;
-
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ uint8_t queue_id3;
/*
- * This field represents the major version of network control firmware.
- * A change in major version represents a major release.
+ * ID of CoS Queue 3. FF - Invalid id # This ID can be used on
+ * any subsequent call to an hwrm command that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software. # Any driver or software should
+ * not make any assumptions about queue IDs. # A value of 0xff
+ * indicates that the queue is not available. # Available queues
+ * may not be in sequential order.
*/
- uint8_t netctrl_fw_maj;
-
+ uint8_t queue_id3_service_profile;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
/*
- * This field represents the minor version of network control firmware.
- * A change in minor version represents significant functionality
- * changes.
+ * Set to 0xFF... (All Fs) if there is no
+ * service profile specified
*/
- uint8_t netctrl_fw_min;
-
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ uint8_t queue_id4;
/*
- * This field represents the build version of network control firmware.
- * A change in update version represents bug fixes.
+ * ID of CoS Queue 4. FF - Invalid id # This ID can be used on
+ * any subsequent call to an hwrm command that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software. # Any driver or software should
+ * not make any assumptions about queue IDs. # A value of 0xff
+ * indicates that the queue is not available. # Available queues
+ * may not be in sequential order.
*/
- uint8_t netctrl_fw_bld;
-
+ uint8_t queue_id4_service_profile;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
/*
- * This field is a reserved field. This field can be used to represent
- * firmware branches or customer specific releases tied to a specific
- * (major,minor,update) version
+ * Set to 0xFF... (All Fs) if there is no
+ * service profile specified
*/
- uint8_t netctrl_fw_rsvd;
-
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ uint8_t queue_id5;
/*
- * This field is reserved for future use. The responder should set it to
- * 0. The requester should ignore this field.
+ * ID of CoS Queue 5. FF - Invalid id # This ID can be used on
+ * any subsequent call to an hwrm command that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software. # Any driver or software should
+ * not make any assumptions about queue IDs. # A value of 0xff
+ * indicates that the queue is not available. # Available queues
+ * may not be in sequential order.
*/
- uint32_t reserved1;
-
+ uint8_t queue_id5_service_profile;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
/*
- * This field represents the major version of RoCE firmware. A change in
- * major version represents a major release.
+ * Set to 0xFF... (All Fs) if there is no
+ * service profile specified
*/
- uint8_t roce_fw_maj;
-
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ uint8_t queue_id6;
/*
- * This field represents the minor version of RoCE firmware. A change in
- * minor version represents significant functionality changes.
+ * ID of CoS Queue 6. FF - Invalid id # This ID can be used on
+ * any subsequent call to an hwrm command that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software. # Any driver or software should
+ * not make any assumptions about queue IDs. # A value of 0xff
+ * indicates that the queue is not available. # Available queues
+ * may not be in sequential order.
*/
- uint8_t roce_fw_min;
-
+ uint8_t queue_id6_service_profile;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
/*
- * This field represents the build version of RoCE firmware. A change in
- * update version represents bug fixes.
+ * Set to 0xFF... (All Fs) if there is no
+ * service profile specified
*/
- uint8_t roce_fw_bld;
-
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ uint8_t queue_id7;
/*
- * This field is a reserved field. This field can be used to represent
- * firmware branches or customer specific releases tied to a specific
- * (major,minor,update) version
+ * ID of CoS Queue 7. FF - Invalid id # This ID can be used on
+ * any subsequent call to an hwrm command that takes a queue id.
+ * # IDs must always be queried by this command before any use
+ * by the driver or software. # Any driver or software should
+ * not make any assumptions about queue IDs. # A value of 0xff
+ * indicates that the queue is not available. # Available queues
+ * may not be in sequential order.
*/
- uint8_t roce_fw_rsvd;
-
+ uint8_t queue_id7_service_profile;
+ /* This value is applicable to CoS queues only. */
+ /* Lossy (best-effort) */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \
+ UINT32_C(0x0)
+ /* Lossless */
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \
+ UINT32_C(0x1)
/*
- * This field represents the name of HWRM FW (ASCII chars without NULL
- * at the end).
+ * Set to 0xFF... (All Fs) if there is no
+ * service profile specified
*/
- char hwrm_fw_name[16];
-
+ #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \
+ UINT32_C(0xff)
+ uint8_t valid;
/*
- * This field represents the name of mgmt FW (ASCII chars without NULL
- * at the end).
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- char mgmt_fw_name[16];
+} __attribute__((packed));
+/* hwrm_vnic_alloc */
+/*
+ * Description: This VNIC is a resource in the RX side of the chip that is used
+ * to represent a virtual host "interface". # At the time of VNIC allocation or
+ * configuration, the function can specify whether it wants the requested VNIC
+ * to be the default VNIC for the function or not. # If a function requests
+ * allocation of a VNIC for the first time and a VNIC is successfully allocated
+ * by the HWRM, then the HWRM shall make the allocated VNIC as the default VNIC
+ * for that function. # The default VNIC shall be used for the default action
+ * for a partition or function. # For each VNIC allocated on a function, a
+ * mapping on the RX side to map the allocated VNIC to source virtual interface
+ * shall be performed by the HWRM. This should be hidden to the function driver
+ * requesting the VNIC allocation. This enables broadcast/multicast replication
+ * with source knockout. # If multicast replication with source knockout is
+ * enabled, then the internal VNIC to SVIF mapping data structures shall be
+ * programmed at the time of VNIC allocation.
+ */
+/* Input (24 bytes) */
+struct hwrm_vnic_alloc_input {
+ uint16_t req_type;
/*
- * This field represents the name of network control firmware (ASCII
- * chars without NULL at the end).
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
- char netctrl_fw_name[16];
-
+ uint16_t cmpl_ring;
/*
- * This field is reserved for future use. The responder should set it to
- * 0. The requester should ignore this field.
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
- uint32_t reserved2[4];
-
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
/*
- * This field represents the name of RoCE FW (ASCII chars without NULL
- * at the end).
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
- char roce_fw_name[16];
-
- /* This field returns the chip number. */
- uint16_t chip_num;
-
- /* This field returns the revision of chip. */
- uint8_t chip_rev;
-
- /* This field returns the chip metal number. */
- uint8_t chip_metal;
-
- /* This field returns the bond id of the chip. */
- uint8_t chip_bond_id;
-
+ uint64_t resp_addr;
/*
- * This value indicates the type of platform used for chip
- * implementation.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
- /* ASIC */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC \
- (UINT32_C(0x0) << 0)
- /* FPGA platform of the chip. */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA \
- (UINT32_C(0x1) << 0)
- /* Palladium platform of the chip. */
- #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM \
- (UINT32_C(0x2) << 0)
- uint8_t chip_platform_type;
-
+ uint32_t flags;
/*
- * This field returns the maximum value of request window that is
- * supported by the HWRM. The request window is mapped into device
- * address space using MMIO.
+ * When this bit is '1', this VNIC is requested to be the
+ * default VNIC for this function.
*/
- uint16_t max_req_win_len;
+ #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ uint32_t unused_0;
+} __attribute__((packed));
+/* Output (16 bytes) */
+struct hwrm_vnic_alloc_output {
+ uint16_t error_code;
/*
- * This field returns the maximum value of response buffer in bytes. If
- * a request specifies the response buffer length that is greater than
- * this value, then the HWRM should fail it. The value of this field
- * shall be 4KB or more.
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t max_resp_len;
-
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field returns the default request timeout value in milliseconds.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t def_req_timeout;
-
+ uint32_t vnic_id;
+ /* Logical vnic ID */
uint8_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* hwrm_queue_qportcfg */
+/* hwrm_vnic_free */
/*
- * Description: This function is called by a driver to query queue configuration
- * of a port. # The HWRM shall at least advertise one queue with lossy service
- * profile. # The driver shall use this command to query queue ids before
- * configuring or using any queues. # If a service profile is not set for a
- * queue, then the driver shall not use that queue without configuring a service
- * profile for it. # If the driver is not allowed to configure service profiles,
- * then the driver shall only use queues for which service profiles are pre-
- * configured.
+ * Description: Free a VNIC resource. Idle any resources associated with the
+ * VNIC as well as the VNIC. Reset and release all resources associated with the
+ * VNIC.
*/
-
/* Input (24 bytes) */
-struct hwrm_queue_qportcfg_input {
+struct hwrm_vnic_free_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t vnic_id;
+ /* Logical vnic ID */
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_free_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+/* hwrm_vnic_cfg */
+/* Description: Configure the RX VNIC structure. */
+/* Input (40 bytes) */
+struct hwrm_vnic_cfg_input {
+ uint16_t req_type;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
/* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC is requested to be the default
+ * VNIC for the function.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC is being configured to strip
+ * VLAN in the RX path. If set to '0', then VLAN stripping is
+ * disabled on this VNIC.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC is being configured to buffer
+ * receive packets in the hardware until the host posts new
+ * receive buffers. If set to '0', then bd_stall is being
+ * configured to be disabled on this VNIC.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC is being configured to receive
+ * both RoCE and non-RoCE traffic. If set to '0', then this VNIC
+ * is not configured to be operating in dual VNIC mode.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8)
+ /*
+ * When this flag is set to '1', the VNIC is requested to be
+ * configured to receive only RoCE traffic. If this flag is set
+ * to '0', then this flag shall be ignored by the HWRM. If
+ * roce_dual_vnic_mode flag is set to '1', then the HWRM client
+ * shall not set this flag to '1'.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
+ /*
+ * When a VNIC uses one destination ring group for certain
+ * application (e.g. Receive Flow Steering) where exact match is
+ * used to direct packets to a VNIC with one destination ring
+ * group only, there is no need to configure RSS indirection
+ * table for that VNIC as only one destination ring group is
+ * used. This flag is used to enable a mode where RSS is enabled
+ * in the VNIC using a RSS context for computing RSS hash but
+ * the RSS indirection table is not configured using
+ * hwrm_vnic_rss_cfg. If this mode is enabled, then the driver
+ * should not program RSS indirection table for the RSS context
+ * that is used for computing RSS hash only.
+ */
+ #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20)
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the dflt_ring_grp field to be
+ * configured.
+ */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP UINT32_C(0x1)
+ /* This bit must be '1' for the rss_rule field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE UINT32_C(0x2)
+ /* This bit must be '1' for the cos_rule field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE UINT32_C(0x4)
+ /* This bit must be '1' for the lb_rule field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE UINT32_C(0x8)
+ /* This bit must be '1' for the mru field to be configured. */
+ #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU UINT32_C(0x10)
+ uint16_t vnic_id;
+ /* Logical vnic ID */
+ uint16_t dflt_ring_grp;
+ /*
+ * Default Completion ring for the VNIC. This ring will be
+ * chosen if packet does not match any RSS rules and if there is
+ * no COS rule.
+ */
+ uint16_t rss_rule;
+ /*
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * there is no RSS rule.
+ */
+ uint16_t cos_rule;
+ /*
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * there is no COS rule.
+ */
+ uint16_t lb_rule;
+ /*
+ * RSS ID for load balancing rule/table structure. 0xFF... (All
+ * Fs) if there is no LB rule.
+ */
+ uint16_t mru;
+ /*
+ * The maximum receive unit of the vnic. Each vnic is associated
+ * with a function. The vnic mru value overwrites the mru
+ * setting of the associated function. The HWRM shall make sure
+ * that vnic mru does not exceed the mru of the port the
+ * function is associated with.
+ */
+ uint32_t unused_0;
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+/* hwrm_vnic_rss_cfg */
+/* Description: This function is used to enable RSS configuration. */
+/* Input (48 bytes) */
+struct hwrm_vnic_rss_cfg_input {
+ uint16_t req_type;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t hash_type;
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source and destination IPv4 addresses of IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv4 addresses and source/destination
+ * ports of TCP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv4 addresses and source/destination
+ * ports of UDP/IPv4 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source and destination IPv4 addresses of IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv6 addresses and source/destination
+ * ports of TCP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+ /*
+ * When this bit is '1', the RSS hash shall be computed over
+ * source/destination IPv6 addresses and source/destination
+ * ports of UDP/IPv6 packets.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
+ uint32_t unused_0;
+ uint64_t ring_grp_tbl_addr;
+ /* This is the address for rss ring group table */
+ uint64_t hash_key_tbl_addr;
+ /* This is the address for rss hash key table */
+ uint16_t rss_ctx_idx;
+ /* Index to the rss indirection table. */
+ uint16_t unused_1[3];
+} __attribute__((packed));
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
+/* hwrm_vnic_rss_cos_lb_ctx_alloc */
+/* Description: This function is used to allocate COS/Load Balance context. */
+/* Input (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+} __attribute__((packed));
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+ uint16_t error_code;
/*
- * Enumeration denoting the RX, TX type of the resource. This
- * enumeration is used for resources that are similar for both TX and RX
- * paths of the chip.
- */
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH \
- UINT32_C(0x1)
- /* tx path */
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX \
- (UINT32_C(0x0) << 0)
- /* rx path */
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX \
- (UINT32_C(0x1) << 0)
- #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \
- HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX
- uint32_t flags;
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint16_t rss_cos_lb_ctx_id;
+ /* rss_cos_lb_ctx_id is 16 b */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t unused_4;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+/* hwrm_vnic_rss_cos_lb_ctx_free */
+/* Description: This function can be used to free COS/Load Balance context. */
+/* Input (24 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+ uint16_t req_type;
/*
- * Port ID of port for which the queue configuration is being queried.
- * This field is only required when sent by IPC.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
- uint16_t port_id;
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint16_t rss_cos_lb_ctx_id;
+ /* rss_cos_lb_ctx_id is 16 b */
+ uint16_t unused_0[3];
+} __attribute__((packed));
- uint16_t unused_0;
+/* Output (16 bytes) */
+struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
} __attribute__((packed));
/* hwrm_ring_alloc */
/*
* Description: This command allocates and does basic preparation for a ring.
*/
-
/* Input (80 bytes) */
struct hwrm_ring_alloc_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t enables;
/* This bit must be '1' for the Reserved1 field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED1 UINT32_C(0x1)
- /* This bit must be '1' for the Reserved2 field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED2 UINT32_C(0x2)
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED1 UINT32_C(0x1)
+ /* This bit must be '1' for the ring_arb_cfg field to be configured. */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RING_ARB_CFG UINT32_C(0x2)
/* This bit must be '1' for the Reserved3 field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED3 UINT32_C(0x4)
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED3 UINT32_C(0x4)
/*
* This bit must be '1' for the stat_ctx_id_valid field to be
* configured.
*/
#define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID UINT32_C(0x8)
/* This bit must be '1' for the Reserved4 field to be configured. */
- #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED4 UINT32_C(0x10)
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED4 UINT32_C(0x10)
/* This bit must be '1' for the max_bw_valid field to be configured. */
#define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20)
- uint32_t enables;
-
- /* Ring Type. */
- /* Completion Ring (CR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL (UINT32_C(0x0) << 0)
- /* TX Ring (TR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX (UINT32_C(0x1) << 0)
- /* RX Ring (RR) */
- #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX (UINT32_C(0x2) << 0)
uint8_t ring_type;
-
+ /* Ring Type. */
+ /* Completion Ring (CR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX UINT32_C(0x2)
uint8_t unused_0;
uint16_t unused_1;
-
- /* This value is a pointer to the page table for the Ring. */
uint64_t page_tbl_addr;
-
- /* First Byte Offset of the first entry in the first page. */
+ /* This value is a pointer to the page table for the Ring. */
uint32_t fbo;
-
- /*
- * Actual page size in 2^page_size. The supported range is increments in
- * powers of 2 from 16 bytes to 1GB. - 4 = 16 B Page size is 16 B. - 12
- * = 4 KB Page size is 4 KB. - 13 = 8 KB Page size is 8 KB. - 16 = 64 KB
- * Page size is 64 KB. - 22 = 2 MB Page size is 2 MB. - 23 = 4 MB Page
- * size is 4 MB. - 31 = 1 GB Page size is 1 GB.
- */
+ /* First Byte Offset of the first entry in the first page. */
uint8_t page_size;
-
/*
- * This value indicates the depth of page table. For this version of the
- * specification, value other than 0 or 1 shall be considered as an
- * invalid value. When the page_tbl_depth = 0, then it is treated as a
- * special case with the following. 1. FBO and page size fields are not
- * valid. 2. page_tbl_addr is the physical address of the first element
- * of the ring.
+ * Actual page size in 2^page_size. The supported range is
+ * increments in powers of 2 from 16 bytes to 1GB. - 4 = 16 B
+ * Page size is 16 B. - 12 = 4 KB Page size is 4 KB. - 13 = 8 KB
+ * Page size is 8 KB. - 16 = 64 KB Page size is 64 KB. - 21 = 2
+ * MB Page size is 2 MB. - 22 = 4 MB Page size is 4 MB. - 30 = 1
+ * GB Page size is 1 GB.
*/
uint8_t page_tbl_depth;
-
- uint8_t unused_2;
- uint8_t unused_3;
-
/*
- * Number of 16B units in the ring. Minimum size for a ring is 16 16B
- * entries.
+ * This value indicates the depth of page table. For this
+ * version of the specification, value other than 0 or 1 shall
+ * be considered as an invalid value. When the page_tbl_depth =
+ * 0, then it is treated as a special case with the following.
+ * 1. FBO and page size fields are not valid. 2. page_tbl_addr
+ * is the physical address of the first element of the ring.
*/
+ uint8_t unused_2;
+ uint8_t unused_3;
uint32_t length;
-
/*
- * Logical ring number for the ring to be allocated. This value
- * determines the position in the doorbell area where the update to the
- * ring will be made. For completion rings, this value is also the MSI-X
- * vector number for the function the completion ring is associated
- * with.
+ * Number of 16B units in the ring. Minimum size for a ring is
+ * 16 16B entries.
*/
uint16_t logical_id;
-
/*
- * This field is used only when ring_type is a TX ring. This value
- * indicates what completion ring the TX ring is associated with.
+ * Logical ring number for the ring to be allocated. This value
+ * determines the position in the doorbell area where the update
+ * to the ring will be made. For completion rings, this value is
+ * also the MSI-X vector number for the function the completion
+ * ring is associated with.
*/
uint16_t cmpl_ring_id;
-
/*
- * This field is used only when ring_type is a TX ring. This value
- * indicates what CoS queue the TX ring is associated with.
+ * This field is used only when ring_type is a TX ring. This
+ * value indicates what completion ring the TX ring is
+ * associated with.
*/
uint16_t queue_id;
-
+ /*
+ * This field is used only when ring_type is a TX ring. This
+ * value indicates what CoS queue the TX ring is associated
+ * with.
+ */
uint8_t unused_4;
uint8_t unused_5;
-
- /* This field is reserved for the future use. It shall be set to 0. */
uint32_t reserved1;
/* This field is reserved for the future use. It shall be set to 0. */
- uint16_t reserved2;
-
+ uint16_t ring_arb_cfg;
+ /*
+ * This field is used only when ring_type is a TX ring. This
+ * field is used to configure arbitration related parameters for
+ * a TX ring.
+ */
+ /* Arbitration policy used for the ring. */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_MASK \
+ UINT32_C(0xf)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SFT 0
+ /*
+ * Use strict priority for the TX ring. Priority
+ * value is specified in arb_policy_param
+ */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_SP \
+ (UINT32_C(0x1) << 0)
+ /*
+ * Use weighted fair queue arbitration for the
+ * TX ring. Weight is specified in
+ * arb_policy_param
+ */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ \
+ (UINT32_C(0x2) << 0)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_LAST \
+ RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_WFQ
+ /* Reserved field. */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_MASK UINT32_C(0xf0)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_RSVD_SFT 4
+ /*
+ * Arbitration policy specific parameter. # For strict priority
+ * arbitration policy, this field represents a priority value.
+ * If set to 0, then the priority is not specified and the HWRM
+ * is allowed to select any priority for this TX ring. # For
+ * weighted fair queue arbitration policy, this field represents
+ * a weight value. If set to 0, then the weight is not specified
+ * and the HWRM is allowed to select any weight for this TX
+ * ring.
+ */
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK \
+ UINT32_C(0xff00)
+ #define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
uint8_t unused_6;
uint8_t unused_7;
- /* This field is reserved for the future use. It shall be set to 0. */
uint32_t reserved3;
-
- /*
- * This field is used only when ring_type is a TX ring. This input
- * indicates what statistics context this ring should be associated
- * with.
- */
- uint32_t stat_ctx_id;
-
/* This field is reserved for the future use. It shall be set to 0. */
- uint32_t reserved4;
-
+ uint32_t stat_ctx_id;
/*
- * This field is used only when ring_type is a TX ring. Maximum BW
- * allocated to this TX ring in Mbps. The HWRM will translate this value
- * into byte counter and time interval used for this ring inside the
- * device.
+ * This field is used only when ring_type is a TX ring. This
+ * input indicates what statistics context this ring should be
+ * associated with.
*/
+ uint32_t reserved4;
+ /* This field is reserved for the future use. It shall be set to 0. */
uint32_t max_bw;
-
/*
- * This field is used only when ring_type is a Completion ring. This
- * value indicates what interrupt mode should be used on this completion
- * ring. Note: In the legacy interrupt mode, no more than 16 completion
- * rings are allowed.
- */
- /* Legacy INTA */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY (UINT32_C(0x0) << 0)
- /* Reserved */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD (UINT32_C(0x1) << 0)
- /* MSI-X */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX (UINT32_C(0x2) << 0)
- /* No Interrupt - Polled mode */
- #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL (UINT32_C(0x3) << 0)
+ * This field is used only when ring_type is a TX ring to
+ * specify maximum BW allocated to the TX ring. The HWRM will
+ * translate this value into byte counter and time interval used
+ * for this ring inside the device.
+ */
+ /* Bandwidth value */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_MASK \
+ UINT32_C(0xfffffff)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_SFT 0
+ /* Reserved */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_RSVD UINT32_C(0x10000000)
+ /* bw_value_unit is 3 b */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MASK \
+ UINT32_C(0xe0000000)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_SFT 29
+ /* Value is in Mbps */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_MBPS \
+ (UINT32_C(0x0) << 29)
+ /* Value is in 1/100th of a percentage of total bandwidth. */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 \
+ (UINT32_C(0x1) << 29)
+ /* Invalid unit */
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID \
+ (UINT32_C(0x7) << 29)
+ #define HWRM_RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_LAST \
+ RING_ALLOC_INPUT_MAX_BW_BW_VALUE_UNIT_INVALID
uint8_t int_mode;
-
+ /*
+ * This field is used only when ring_type is a Completion ring.
+ * This value indicates what interrupt mode should be used on
+ * this completion ring. Note: In the legacy interrupt mode, no
+ * more than 16 completion rings are allowed.
+ */
+ /* Legacy INTA */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY UINT32_C(0x0)
+ /* Reserved */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD UINT32_C(0x1)
+ /* MSI-X */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX UINT32_C(0x2)
+ /* No Interrupt - Polled mode */
+ #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL UINT32_C(0x3)
uint8_t unused_8[3];
} __attribute__((packed));
/* Output (16 bytes) */
-
struct hwrm_ring_alloc_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
- /* Physical number of ring allocated. */
uint16_t ring_id;
-
- /* Logical number of ring allocated. */
+ /*
+ * Physical number of ring allocated. This value shall be unique
+ * for a ring type.
+ */
uint16_t logical_ring_id;
-
+ /* Logical number of ring allocated. */
uint8_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
/* hwrm_ring_free */
@@ -4197,188 +4860,163 @@ struct hwrm_ring_alloc_output {
* Description: This command is used to free a ring and associated resources.
*/
/* Input (24 bytes) */
-
struct hwrm_ring_free_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /* Ring Type. */
- /* Completion Ring (CR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_CMPL (UINT32_C(0x0) << 0)
- /* TX Ring (TR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_TX (UINT32_C(0x1) << 0)
- /* RX Ring (RR) */
- #define HWRM_RING_FREE_INPUT_RING_TYPE_RX (UINT32_C(0x2) << 0)
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
uint8_t ring_type;
-
+ /* Ring Type. */
+ /* Completion Ring (CR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_CMPL UINT32_C(0x0)
+ /* TX Ring (TR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_TX UINT32_C(0x1)
+ /* RX Ring (RR) */
+ #define HWRM_RING_FREE_INPUT_RING_TYPE_RX UINT32_C(0x2)
uint8_t unused_0;
-
- /* Physical number of ring allocated. */
uint16_t ring_id;
-
+ /* Physical number of ring allocated. */
uint32_t unused_1;
} __attribute__((packed));
/* Output (16 bytes) */
struct hwrm_ring_free_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
uint32_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
uint8_t unused_3;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
/* hwrm_ring_grp_alloc */
/*
* Description: This API allocates and does basic preparation for a ring group.
*/
-
/* Input (24 bytes) */
struct hwrm_ring_grp_alloc_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /* This value identifies the CR associated with the ring group. */
- uint16_t cr;
-
- /* This value identifies the main RR associated with the ring group. */
- uint16_t rr;
-
/*
- * This value identifies the aggregation RR associated with the ring
- * group. If this value is 0xFF... (All Fs), then no Aggregation ring
- * will be set.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
+ uint16_t cr;
+ /* This value identifies the CR associated with the ring group. */
+ uint16_t rr;
+ /* This value identifies the main RR associated with the ring group. */
uint16_t ar;
-
/*
- * This value identifies the statistics context associated with the ring
- * group.
+ * This value identifies the aggregation RR associated with the
+ * ring group. If this value is 0xFF... (All Fs), then no
+ * Aggregation ring will be set.
*/
uint16_t sc;
+ /*
+ * This value identifies the statistics context associated with
+ * the ring group.
+ */
} __attribute__((packed));
/* Output (16 bytes) */
struct hwrm_ring_grp_alloc_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
- /*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
- */
+ /* This field provides original sequence number of the command. */
uint16_t resp_len;
-
/*
- * This is the ring group ID value. Use this value to program the
- * default ring group for the VNIC or as table entries in an RSS/COS
- * context.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
uint32_t ring_group_id;
-
+ /*
+ * This is the ring group ID value. Use this value to program
+ * the default ring group for the VNIC or as table entries in an
+ * RSS/COS context.
+ */
uint8_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
/* hwrm_ring_grp_free */
@@ -4390,1410 +5028,1016 @@ struct hwrm_ring_grp_alloc_output {
* a part of executing this command, the HWRM shall reset all associated ring
* group resources.
*/
-
/* Input (24 bytes) */
struct hwrm_ring_grp_free_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /* This is the ring group ID value. */
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
uint32_t ring_group_id;
-
+ /* This is the ring group ID value. */
uint32_t unused_0;
} __attribute__((packed));
/* Output (16 bytes) */
struct hwrm_ring_grp_free_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
uint32_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
uint8_t unused_3;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* hwrm_stat_ctx_alloc */
+/* hwrm_cfa_l2_filter_alloc */
/*
- * Description: This command allocates and does basic preparation for a stat
- * context.
+ * A filter is used to identify traffic that contains a matching set of
+ * parameters like unicast or broadcast MAC address or a VLAN tag amongst
+ * other things which then allows the ASIC to direct the incoming traffic
+ * to an appropriate VNIC or Rx ring.
*/
-
-/* Input (32 bytes) */
-struct hwrm_stat_ctx_alloc_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+/* Input (96 bytes) */
+struct hwrm_cfa_l2_filter_alloc_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /* This is the address for statistic block. */
- uint64_t stats_dma_addr;
-
/*
- * The statistic block update period in ms. e.g. 250ms, 500ms, 750ms,
- * 1000ms.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
- uint32_t update_period_ms;
-
- uint32_t unused_0;
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_stat_ctx_alloc_output {
+ uint32_t flags;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Enumeration denoting the RX, TX type of the resource. This
+ * enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
- uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
- uint16_t seq_id;
-
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX (UINT32_C(0x0) << 0)
+ /* rx path */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \
+ CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * Setting of this flag indicates the applicability to the
+ * loopback path.
*/
- uint16_t resp_len;
-
- /* This is the statistics context ID value. */
- uint32_t stat_ctx_id;
-
- uint8_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
-
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK UINT32_C(0x2)
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * Setting of this flag indicates drop action. If this flag is
+ * not set, then it should be considered accept action.
*/
- uint8_t valid;
-} __attribute__((packed));
-
-/* hwrm_stat_ctx_clr_stats */
-/* Description: This command clears statistics of a context. */
-
-/* Input (24 bytes) */
-struct hwrm_stat_ctx_clr_stats_input {
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x4)
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * If this flag is set, all t_l2_* fields are invalid and they
+ * should not be specified. If this flag is set, then l2_*
+ * fields refer to fields of outermost L2 header.
*/
- uint16_t req_type;
-
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST UINT32_C(0x8)
+ uint32_t enables;
+ /* This bit must be '1' for the l2_addr field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR UINT32_C(0x1)
+ /* This bit must be '1' for the l2_addr_mask field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK UINT32_C(0x2)
+ /* This bit must be '1' for the l2_ovlan field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN UINT32_C(0x4)
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This bit must be '1' for the l2_ovlan_mask field to be
+ * configured.
*/
- uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK UINT32_C(0x8)
+ /* This bit must be '1' for the l2_ivlan field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN UINT32_C(0x10)
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This bit must be '1' for the l2_ivlan_mask field to be
+ * configured.
*/
- uint16_t target_id;
-
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK UINT32_C(0x20)
+ /* This bit must be '1' for the t_l2_addr field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR UINT32_C(0x40)
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * This bit must be '1' for the t_l2_addr_mask field to be
+ * configured.
*/
- uint64_t resp_addr;
-
- /* ID of the statistics context that is being queried. */
- uint32_t stat_ctx_id;
-
- uint32_t unused_0;
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_stat_ctx_clr_stats_output {
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK UINT32_C(0x80)
+ /* This bit must be '1' for the t_l2_ovlan field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN UINT32_C(0x100)
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * This bit must be '1' for the t_l2_ovlan_mask field to be
+ * configured.
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
- uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
- uint16_t seq_id;
-
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK UINT32_C(0x200)
+ /* This bit must be '1' for the t_l2_ivlan field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN UINT32_C(0x400)
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This bit must be '1' for the t_l2_ivlan_mask field to be
+ * configured.
*/
- uint16_t resp_len;
-
- uint32_t unused_0;
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK UINT32_C(0x800)
+ /* This bit must be '1' for the src_type field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE UINT32_C(0x1000)
+ /* This bit must be '1' for the src_id field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID UINT32_C(0x2000)
+ /* This bit must be '1' for the tunnel_type field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x4000)
+ /* This bit must be '1' for the dst_id field to be configured. */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x8000)
+ /*
+ * This bit must be '1' for the mirror_vnic_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID UINT32_C(0x10000)
+ uint8_t l2_addr[6];
+ /*
+ * This value sets the match value for the L2 MAC address.
+ * Destination MAC address for RX path. Source MAC address for
+ * TX path.
+ */
+ uint8_t unused_0;
uint8_t unused_1;
+ uint8_t l2_addr_mask[6];
+ /*
+ * This value sets the mask value for the L2 address. A value of
+ * 0 will mask the corresponding bit from compare.
+ */
+ uint16_t l2_ovlan;
+ /* This value sets VLAN ID value for outer VLAN. */
+ uint16_t l2_ovlan_mask;
+ /*
+ * This value sets the mask value for the ovlan id. A value of 0
+ * will mask the corresponding bit from compare.
+ */
+ uint16_t l2_ivlan;
+ /* This value sets VLAN ID value for inner VLAN. */
+ uint16_t l2_ivlan_mask;
+ /*
+ * This value sets the mask value for the ivlan id. A value of 0
+ * will mask the corresponding bit from compare.
+ */
uint8_t unused_2;
uint8_t unused_3;
-
+ uint8_t t_l2_addr[6];
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This value sets the match value for the tunnel L2 MAC
+ * address. Destination MAC address for RX path. Source MAC
+ * address for TX path.
*/
- uint8_t valid;
-} __attribute__((packed));
-
-/* hwrm_stat_ctx_free */
-/* Description: This command is used to free a stat context. */
-/* Input (24 bytes) */
-
-struct hwrm_stat_ctx_free_input {
+ uint8_t unused_4;
+ uint8_t unused_5;
+ uint8_t t_l2_addr_mask[6];
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * This value sets the mask value for the tunnel L2 address. A
+ * value of 0 will mask the corresponding bit from compare.
*/
- uint16_t req_type;
-
+ uint16_t t_l2_ovlan;
+ /* This value sets VLAN ID value for tunnel outer VLAN. */
+ uint16_t t_l2_ovlan_mask;
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value sets the mask value for the tunnel ovlan id. A
+ * value of 0 will mask the corresponding bit from compare.
*/
- uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
+ uint16_t t_l2_ivlan;
+ /* This value sets VLAN ID value for tunnel inner VLAN. */
+ uint16_t t_l2_ivlan_mask;
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value sets the mask value for the tunnel ivlan id. A
+ * value of 0 will mask the corresponding bit from compare.
*/
- uint16_t target_id;
-
+ uint8_t src_type;
+ /* This value identifies the type of source of the packet. */
+ /* Network port */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT UINT32_C(0x0)
+ /* Physical function */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF UINT32_C(0x1)
+ /* Virtual function */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF UINT32_C(0x2)
+ /* Virtual NIC of a function */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC UINT32_C(0x3)
+ /* Embedded processor for CFA management */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG UINT32_C(0x4)
+ /* Embedded processor for OOB management */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE UINT32_C(0x5)
+ /* Embedded processor for RoCE */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO UINT32_C(0x6)
+ /* Embedded processor for network proxy functions */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG UINT32_C(0x7)
+ uint8_t unused_6;
+ uint32_t src_id;
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * This value is the id of the source. For a network port, it
+ * represents port_id. For a physical function, it represents
+ * fid. For a virtual function, it represents vf_id. For a vnic,
+ * it represents vnic_id. For embedded processors, this id is
+ * not valid. Notes: 1. The function ID is implied if it src_id
+ * is not provided for a src_type that is either
+ */
+ uint8_t tunnel_type;
+ /* Tunnel Type. */
+ /* Non-tunnel */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL UINT32_C(0x0)
+ /* Virtual eXtensible Local Area Network (VXLAN) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ /*
+ * Network Virtualization Generic Routing
+ * Encapsulation (NVGRE)
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2)
+ /*
+ * Generic Routing Encapsulation (GRE) inside
+ * Ethernet payload
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3)
+ /* IP in IP */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4)
+ /* Generic Network Virtualization Encapsulation (Geneve) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ /* Multi-Protocol Lable Switching (MPLS) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6)
+ /* Stateless Transport Tunnel (STT) */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7)
+ /*
+ * Generic Routing Encapsulation (GRE) inside IP
+ * datagram payload
+ */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8)
+ /* Any tunneled traffic */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL UINT32_C(0xff)
+ uint8_t unused_7;
+ uint16_t dst_id;
+ /*
+ * If set, this value shall represent the Logical VNIC ID of the
+ * destination VNIC for the RX path and network port id of the
+ * destination port for the TX path.
+ */
+ uint16_t mirror_vnic_id;
+ /* Logical VNIC ID of the VNIC where traffic is mirrored. */
+ uint8_t pri_hint;
+ /*
+ * This hint is provided to help in placing the filter in the
+ * filter table.
+ */
+ /* No preference */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER UINT32_C(0x0)
+ /* Above the given filter */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER UINT32_C(0x1)
+ /* Below the given filter */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER UINT32_C(0x2)
+ /* As high as possible */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX UINT32_C(0x3)
+ /* As low as possible */
+ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN UINT32_C(0x4)
+ uint8_t unused_8;
+ uint32_t unused_9;
+ uint64_t l2_filter_id_hint;
+ /*
+ * This is the ID of the filter that goes along with the
+ * pri_hint. This field is valid only for the following values.
+ * 1 - Above the given filter 2 - Below the given filter
*/
- uint64_t resp_addr;
-
- /* ID of the statistics context that is being queried. */
- uint32_t stat_ctx_id;
-
- uint32_t unused_0;
} __attribute__((packed));
-/* Output (16 bytes) */
-
-struct hwrm_stat_ctx_free_output {
+/* Output (24 bytes) */
+struct hwrm_cfa_l2_filter_alloc_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint64_t l2_filter_id;
+ /*
+ * This value identifies a set of CFA data structures used for
+ * an L2 context.
+ */
+ uint32_t flow_id;
+ /*
+ * This is the ID of the flow associated with this filter. This
+ * value shall be used to match and associate the flow
+ * identifier returned in completion records. A value of
+ * 0xFFFFFFFF shall indicate no flow id.
*/
- uint16_t resp_len;
-
- /* This is the statistics context ID value. */
- uint32_t stat_ctx_id;
-
uint8_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_alloc */
+/* hwrm_cfa_l2_filter_free */
/*
- * Description: This VNIC is a resource in the RX side of the chip that is used
- * to represent a virtual host "interface". # At the time of VNIC allocation or
- * configuration, the function can specify whether it wants the requested VNIC
- * to be the default VNIC for the function or not. # If a function requests
- * allocation of a VNIC for the first time and a VNIC is successfully allocated
- * by the HWRM, then the HWRM shall make the allocated VNIC as the default VNIC
- * for that function. # The default VNIC shall be used for the default action
- * for a partition or function. # For each VNIC allocated on a function, a
- * mapping on the RX side to map the allocated VNIC to source virtual interface
- * shall be performed by the HWRM. This should be hidden to the function driver
- * requesting the VNIC allocation. This enables broadcast/multicast replication
- * with source knockout. # If multicast replication with source knockout is
- * enabled, then the internal VNIC to SVIF mapping data structures shall be
- * programmed at the time of VNIC allocation.
+ * Description: Free a L2 filter. The HWRM shall free all associated filter
+ * resources with the L2 filter.
*/
-
/* Input (24 bytes) */
-struct hwrm_vnic_alloc_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+struct hwrm_cfa_l2_filter_free_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
/*
- * When this bit is '1', this VNIC is requested to be the default VNIC
- * for this function.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint64_t l2_filter_id;
+ /*
+ * This value identifies a set of CFA data structures used for
+ * an L2 context.
*/
- #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
- uint32_t flags;
-
- uint32_t unused_0;
} __attribute__((packed));
/* Output (16 bytes) */
-struct hwrm_vnic_alloc_output {
+struct hwrm_cfa_l2_filter_free_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
- /* Logical vnic ID */
- uint32_t vnic_id;
-
- uint8_t unused_0;
+ uint32_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
-
+ uint8_t unused_3;
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_cfg */
-/* Description: Configure the RX VNIC structure. */
-
+/* hwrm_cfa_l2_filter_cfg */
+/* Description: Change the configuration of an existing L2 filter */
/* Input (40 bytes) */
-struct hwrm_vnic_cfg_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+struct hwrm_cfa_l2_filter_cfg_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /*
- * When this bit is '1', the VNIC is requested to be the default VNIC
- * for the function.
- */
- #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT UINT32_C(0x1)
- /*
- * When this bit is '1', the VNIC is being configured to strip VLAN in
- * the RX path. If set to '0', then VLAN stripping is disabled on this
- * VNIC.
- */
- #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2)
/*
- * When this bit is '1', the VNIC is being configured to buffer receive
- * packets in the hardware until the host posts new receive buffers. If
- * set to '0', then bd_stall is being configured to be disabled on this
- * VNIC.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
- #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4)
- /*
- * When this bit is '1', the VNIC is being configured to receive both
- * RoCE and non-RoCE traffic. If set to '0', then this VNIC is not
- * configured to be operating in dual VNIC mode.
- */
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8)
- /*
- * When this flag is set to '1', the VNIC is requested to be configured
- * to receive only RoCE traffic. If this flag is set to '0', then this
- * flag shall be ignored by the HWRM. If roce_dual_vnic_mode flag is set
- * to '1', then the HWRM client shall not set this flag to '1'.
- */
- #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10)
uint32_t flags;
-
- /* This bit must be '1' for the dflt_ring_grp field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP UINT32_C(0x1)
- /* This bit must be '1' for the rss_rule field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE UINT32_C(0x2)
- /* This bit must be '1' for the cos_rule field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE UINT32_C(0x4)
- /* This bit must be '1' for the lb_rule field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE UINT32_C(0x8)
- /* This bit must be '1' for the mru field to be configured. */
- #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU UINT32_C(0x10)
- uint32_t enables;
-
- /* Logical vnic ID */
- uint16_t vnic_id;
-
/*
- * Default Completion ring for the VNIC. This ring will be chosen if
- * packet does not match any RSS rules and if there is no COS rule.
+ * Enumeration denoting the RX, TX type of the resource. This
+ * enumeration is used for resources that are similar for both
+ * TX and RX paths of the chip.
*/
- uint16_t dflt_ring_grp;
-
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH UINT32_C(0x1)
+ /* tx path */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX (UINT32_C(0x0) << 0)
+ /* rx path */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX (UINT32_C(0x1) << 0)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST \
+ CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if there is no
- * RSS rule.
+ * Setting of this flag indicates drop action. If this flag is
+ * not set, then it should be considered accept action.
*/
- uint16_t rss_rule;
-
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_DROP UINT32_C(0x2)
+ uint32_t enables;
+ /* This bit must be '1' for the dst_id field to be configured. */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_DST_ID UINT32_C(0x1)
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if there is no
- * COS rule.
+ * This bit must be '1' for the new_mirror_vnic_id field to be
+ * configured.
*/
- uint16_t cos_rule;
-
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID UINT32_C(0x2)
+ uint64_t l2_filter_id;
/*
- * RSS ID for load balancing rule/table structure. 0xFF... (All Fs) if
- * there is no LB rule.
+ * This value identifies a set of CFA data structures used for
+ * an L2 context.
*/
- uint16_t lb_rule;
-
+ uint32_t dst_id;
/*
- * The maximum receive unit of the vnic. Each vnic is associated with a
- * function. The vnic mru value overwrites the mru setting of the
- * associated function. The HWRM shall make sure that vnic mru does not
- * exceed the mru of the port the function is associated with.
+ * If set, this value shall represent the Logical VNIC ID of the
+ * destination VNIC for the RX path and network port id of the
+ * destination port for the TX path.
*/
- uint16_t mru;
-
- uint32_t unused_0;
+ uint32_t new_mirror_vnic_id;
+ /* New Logical VNIC ID of the VNIC where traffic is mirrored. */
} __attribute__((packed));
/* Output (16 bytes) */
-struct hwrm_vnic_cfg_output {
+struct hwrm_cfa_l2_filter_cfg_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
uint32_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
uint8_t unused_3;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_free */
-/*
- * Description: Free a VNIC resource. Idle any resources associated with the
- * VNIC as well as the VNIC. Reset and release all resources associated with the
- * VNIC.
- */
-
-/* Input (24 bytes) */
-struct hwrm_vnic_free_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+/* hwrm_cfa_l2_set_rx_mask */
+/* Description: This command will set rx mask of the function. */
+/* Input (56 bytes) */
+struct hwrm_cfa_l2_set_rx_mask_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /* Logical vnic ID */
- uint32_t vnic_id;
-
- uint32_t unused_0;
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_vnic_free_output {
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
- uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
- uint16_t seq_id;
-
- /*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
- */
- uint16_t resp_len;
-
- uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
-
+ uint32_t vnic_id;
+ /* VNIC ID */
+ uint32_t mask;
+ /* Reserved for future use. */
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_RESERVED UINT32_C(0x1)
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * When this bit is '1', the function is requested to accept
+ * multi-cast packets specified by the multicast addr table.
*/
- uint8_t valid;
-} __attribute__((packed));
-
-/* hwrm_vnic_rss_cfg */
-/* Description: This function is used to enable RSS configuration. */
-
-/* Input (48 bytes) */
-struct hwrm_vnic_rss_cfg_input {
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST UINT32_C(0x2)
/*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
+ * When this bit is '1', the function is requested to accept all
+ * multi-cast packets.
*/
- uint16_t req_type;
-
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST UINT32_C(0x4)
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * When this bit is '1', the function is requested to accept
+ * broadcast packets.
*/
- uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST UINT32_C(0x8)
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * When this bit is '1', the function is requested to be put in
+ * the promiscuous mode. The HWRM should accept any function to
+ * set up promiscuous mode. The HWRM shall follow the semantics
+ * below for the promiscuous mode support. # When partitioning
+ * is not enabled on a port (i.e. single PF on the port), then
+ * the PF shall be allowed to be in the promiscuous mode. When
+ * the PF is in the promiscuous mode, then it shall receive all
+ * host bound traffic on that port. # When partitioning is
+ * enabled on a port (i.e. multiple PFs per port) and a PF on
+ * that port is in the promiscuous mode, then the PF receives
+ * all traffic within that partition as identified by a unique
+ * identifier for the PF (e.g. S-Tag). If a unique outer VLAN
+ * for the PF is specified, then the setting of promiscuous mode
+ * on that PF shall result in the PF receiving all host bound
+ * traffic with matching outer VLAN. # A VF shall can be set in
+ * the promiscuous mode. In the promiscuous mode, the VF does
+ * not receive any traffic unless a unique outer VLAN for the VF
+ * is specified. If a unique outer VLAN for the VF is specified,
+ * then the setting of promiscuous mode on that VF shall result
+ * in the VF receiving all host bound traffic with the matching
+ * outer VLAN. # The HWRM shall allow the setting of promiscuous
+ * mode on a function independently from the promiscuous mode
+ * settings on other functions.
*/
- uint16_t target_id;
-
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10)
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * If this flag is set, the corresponding RX filters shall be
+ * set up to cover multicast/broadcast filters for the outermost
+ * Layer 2 destination MAC address field.
*/
- uint64_t resp_addr;
-
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST UINT32_C(0x20)
/*
- * When this bit is '1', the RSS hash shall be computed over source and
- * destination IPv4 addresses of IPv4 packets.
+ * If this flag is set, the corresponding RX filters shall be
+ * set up to cover multicast/broadcast filters for the VLAN-
+ * tagged packets that match the TPID and VID fields of VLAN
+ * tags in the VLAN tag table specified in this command.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY UINT32_C(0x40)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv4 addresses and source/destination ports of
- * TCP/IPv4 packets.
+ * If this flag is set, the corresponding RX filters shall be
+ * set up to cover multicast/broadcast filters for non-VLAN
+ * tagged packets and VLAN-tagged packets that match the TPID
+ * and VID fields of VLAN tags in the VLAN tag table specified
+ * in this command.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN UINT32_C(0x80)
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv4 addresses and source/destination ports of
- * UDP/IPv4 packets.
+ * If this flag is set, the corresponding RX filters shall be
+ * set up to cover multicast/broadcast filters for non-VLAN
+ * tagged packets and VLAN-tagged packets matching any VLAN tag.
+ * If this flag is set, then the HWRM shall ignore VLAN tags
+ * specified in vlan_tag_tbl. If none of vlanonly, vlan_nonvlan,
+ * and anyvlan_nonvlan flags is set, then the HWRM shall ignore
+ * VLAN tags specified in vlan_tag_tbl. The HWRM client shall
+ * set at most one flag out of vlanonly, vlan_nonvlan, and
+ * anyvlan_nonvlan.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4)
+ #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN UINT32_C(0x100)
+ uint64_t mc_tbl_addr;
+ /* This is the address for mcast address tbl. */
+ uint32_t num_mc_entries;
/*
- * When this bit is '1', the RSS hash shall be computed over source and
- * destination IPv4 addresses of IPv6 packets.
+ * This value indicates how many entries in mc_tbl are valid.
+ * Each entry is 6 bytes.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8)
+ uint32_t unused_0;
+ uint64_t vlan_tag_tbl_addr;
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv6 addresses and source/destination ports of
- * TCP/IPv6 packets.
+ * This is the address for VLAN tag table. Each VLAN entry in
+ * the table is 4 bytes of a VLAN tag including TPID, PCP, DEI,
+ * and VID fields in network byte order.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10)
+ uint32_t num_vlan_tags;
/*
- * When this bit is '1', the RSS hash shall be computed over
- * source/destination IPv6 addresses and source/destination ports of
- * UDP/IPv6 packets.
+ * This value indicates how many entries in vlan_tag_tbl are
+ * valid. Each entry is 4 bytes.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20)
- uint32_t hash_type;
-
- uint32_t unused_0;
-
- /* This is the address for rss ring group table */
- uint64_t ring_grp_tbl_addr;
-
- /* This is the address for rss hash key table */
- uint64_t hash_key_tbl_addr;
-
- /* Index to the rss indirection table. */
- uint16_t rss_ctx_idx;
-
- uint16_t unused_1[3];
+ uint32_t unused_1;
} __attribute__((packed));
/* Output (16 bytes) */
-struct hwrm_vnic_rss_cfg_output {
+struct hwrm_cfa_l2_set_rx_mask_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
uint32_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
uint8_t unused_3;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* Input (16 bytes) */
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+/* hwrm_stat_ctx_alloc */
+/*
+ * Description: This command allocates and does basic preparation for a stat
+ * context.
+ */
+/* Input (32 bytes) */
+struct hwrm_stat_ctx_alloc_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint64_t stats_dma_addr;
+ /* This is the address for statistic block. */
+ uint32_t update_period_ms;
+ /*
+ * The statistic block update period in ms. e.g. 250ms, 500ms,
+ * 750ms, 1000ms. If update_period_ms is 0, then the stats
+ * update shall be never done and the DMA address shall not be
+ * used. In this case, the stat block can only be read by
+ * hwrm_stat_ctx_query command.
+ */
+ uint32_t unused_0;
} __attribute__((packed));
/* Output (16 bytes) */
-
-struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+struct hwrm_stat_ctx_alloc_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
- /* rss_cos_lb_ctx_id is 16 b */
- uint16_t rss_cos_lb_ctx_id;
-
+ uint32_t stat_ctx_id;
+ /* This is the statistics context ID value. */
uint8_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
- uint8_t unused_3;
- uint8_t unused_4;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* hwrm_vnic_rss_cos_lb_ctx_free */
-/* Description: This function can be used to free COS/Load Balance context. */
+/* hwrm_stat_ctx_free */
+/* Description: This command is used to free a stat context. */
/* Input (24 bytes) */
-
-struct hwrm_vnic_rss_cos_lb_ctx_free_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+struct hwrm_stat_ctx_free_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /* rss_cos_lb_ctx_id is 16 b */
- uint16_t rss_cos_lb_ctx_id;
-
- uint16_t unused_0[3];
-} __attribute__((packed));
-
-/* Output (16 bytes) */
-struct hwrm_vnic_rss_cos_lb_ctx_free_output {
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
- uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
- uint16_t seq_id;
-
- /*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
- */
- uint16_t resp_len;
-
+ uint32_t stat_ctx_id;
+ /* ID of the statistics context that is being queried. */
uint32_t unused_0;
- uint8_t unused_1;
- uint8_t unused_2;
- uint8_t unused_3;
-
- /*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
- */
- uint8_t valid;
} __attribute__((packed));
-/* Output (32 bytes) */
-struct hwrm_queue_qportcfg_output {
+/* Output (16 bytes) */
+struct hwrm_stat_ctx_free_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
- /*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
- */
+ /* This field provides original sequence number of the command. */
uint16_t resp_len;
-
- /* The maximum number of queues that can be configured. */
- uint8_t max_configurable_queues;
-
- /* The maximum number of lossless queues that can be configured. */
- uint8_t max_configurable_lossless_queues;
-
- /*
- * 0 - Not allowed. Non-zero - Allowed. If this value is non-zero, then
- * the HWRM shall allow the host SW driver to configure queues using
- * hwrm_queue_cfg.
- */
- uint8_t queue_cfg_allowed;
-
- /*
- * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then
- * the HWRM shall allow the host SW driver to configure queue buffers
- * using hwrm_queue_buffers_cfg.
- */
- uint8_t queue_buffers_cfg_allowed;
-
- /*
- * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then
- * the HWRM shall allow the host SW driver to configure PFC using
- * hwrm_queue_pfcenable_cfg.
- */
- uint8_t queue_pfcenable_cfg_allowed;
-
- /*
- * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then
- * the HWRM shall allow the host SW driver to configure Priority to CoS
- * mapping using hwrm_queue_pri2cos_cfg.
- */
- uint8_t queue_pri2cos_cfg_allowed;
-
/*
- * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then
- * the HWRM shall allow the host SW driver to configure CoS Bandwidth
- * configuration using hwrm_queue_cos2bw_cfg.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint8_t queue_cos2bw_cfg_allowed;
-
- /* ID of CoS Queue 0. FF - Invalid id */
- uint8_t queue_id0;
-
- /* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \
- (UINT32_C(0x0) << 0)
- /* Lossless */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \
- (UINT32_C(0x1) << 0)
- /*
- * Set to 0xFF... (All Fs) if there is no service profile
- * specified
- */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \
- (UINT32_C(0xff) << 0)
- uint8_t queue_id0_service_profile;
-
- /* ID of CoS Queue 1. FF - Invalid id */
- uint8_t queue_id1;
- /* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \
- (UINT32_C(0x0) << 0)
- /* Lossless */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \
- (UINT32_C(0x1) << 0)
- /*
- * Set to 0xFF... (All Fs) if there is no service profile
- * specified
- */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \
- (UINT32_C(0xff) << 0)
- uint8_t queue_id1_service_profile;
-
- /* ID of CoS Queue 2. FF - Invalid id */
- uint8_t queue_id2;
- /* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \
- (UINT32_C(0x0) << 0)
- /* Lossless */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \
- (UINT32_C(0x1) << 0)
- /*
- * Set to 0xFF... (All Fs) if there is no service profile
- * specified
- */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \
- (UINT32_C(0xff) << 0)
- uint8_t queue_id2_service_profile;
-
- /* ID of CoS Queue 3. FF - Invalid id */
- uint8_t queue_id3;
-
- /* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \
- (UINT32_C(0x0) << 0)
- /* Lossless */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \
- (UINT32_C(0x1) << 0)
- /*
- * Set to 0xFF... (All Fs) if there is no service profile
- * specified
- */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \
- (UINT32_C(0xff) << 0)
- uint8_t queue_id3_service_profile;
-
- /* ID of CoS Queue 4. FF - Invalid id */
- uint8_t queue_id4;
- /* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \
- (UINT32_C(0x0) << 0)
- /* Lossless */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \
- (UINT32_C(0x1) << 0)
- /*
- * Set to 0xFF... (All Fs) if there is no service profile
- * specified
- */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \
- (UINT32_C(0xff) << 0)
- uint8_t queue_id4_service_profile;
-
- /* ID of CoS Queue 5. FF - Invalid id */
- uint8_t queue_id5;
-
- /* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \
- (UINT32_C(0x0) << 0)
- /* Lossless */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \
- (UINT32_C(0x1) << 0)
- /*
- * Set to 0xFF... (All Fs) if there is no service profile
- * specified
- */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \
- (UINT32_C(0xff) << 0)
- uint8_t queue_id5_service_profile;
-
- /* ID of CoS Queue 6. FF - Invalid id */
- uint8_t queue_id6_service_profile;
- /* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \
- (UINT32_C(0x0) << 0)
- /* Lossless */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \
- (UINT32_C(0x1) << 0)
- /*
- * Set to 0xFF... (All Fs) if there is no service profile
- * specified
- */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \
- (UINT32_C(0xff) << 0)
- uint8_t queue_id6;
-
- /* ID of CoS Queue 7. FF - Invalid id */
- uint8_t queue_id7;
-
- /* This value is applicable to CoS queues only. */
- /* Lossy (best-effort) */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \
- (UINT32_C(0x0) << 0)
- /* Lossless */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \
- (UINT32_C(0x1) << 0)
- /*
- * Set to 0xFF... (All Fs) if there is no service profile
- * specified
- */
- #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \
- (UINT32_C(0xff) << 0)
- uint8_t queue_id7_service_profile;
-
+ uint32_t stat_ctx_id;
+ /* This is the statistics context ID value. */
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_drv_rgtr */
-/*
- * Description: This command is used by the function driver to register its
- * information with the HWRM. A function driver shall implement this command. A
- * function driver shall use this command during the driver initialization right
- * after the HWRM version discovery and default ring resources allocation.
- */
-
-/* Input (80 bytes) */
-struct hwrm_func_drv_rgtr_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+/* hwrm_stat_ctx_clr_stats */
+/* Description: This command clears statistics of a context. */
+/* Input (24 bytes) */
+struct hwrm_stat_ctx_clr_stats_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
- /*
- * When this bit is '1', the function driver is requesting all requests
- * from its children VF drivers to be forwarded to itself. This flag can
- * only be set by the PF driver. If a VF driver sets this flag, it
- * should be ignored by the HWRM.
- */
- #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1)
- /*
- * When this bit is '1', the function is requesting none of the requests
- * from its children VF drivers to be forwarded to itself. This flag can
- * only be set by the PF driver. If a VF driver sets this flag, it
- * should be ignored by the HWRM.
- */
- #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2)
- uint32_t flags;
-
- /* This bit must be '1' for the os_type field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE UINT32_C(0x1)
- /* This bit must be '1' for the ver field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER UINT32_C(0x2)
- /* This bit must be '1' for the timestamp field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP UINT32_C(0x4)
- /* This bit must be '1' for the vf_req_fwd field to be configured. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD UINT32_C(0x8)
/*
- * This bit must be '1' for the async_event_fwd field to be configured.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
- #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD \
- UINT32_C(0x10)
- uint32_t enables;
-
- /* This value indicates the type of OS. */
- /* Unknown */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN \
- (UINT32_C(0x0) << 0)
- /* Other OS not listed below. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER \
- (UINT32_C(0x1) << 0)
- /* MSDOS OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS \
- (UINT32_C(0xe) << 0)
- /* Windows OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS \
- (UINT32_C(0x12) << 0)
- /* Solaris OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS \
- (UINT32_C(0x1d) << 0)
- /* Linux OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX \
- (UINT32_C(0x24) << 0)
- /* FreeBSD OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD \
- (UINT32_C(0x2a) << 0)
- /* VMware ESXi OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI \
- (UINT32_C(0x68) << 0)
- /* Microsoft Windows 8 64-bit OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 \
- (UINT32_C(0x73) << 0)
- /* Microsoft Windows Server 2012 R2 OS. */
- #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 \
- (UINT32_C(0x74) << 0)
- uint16_t os_type;
-
- /* This is the major version of the driver. */
- uint8_t ver_maj;
-
- /* This is the minor version of the driver. */
- uint8_t ver_min;
-
- /* This is the update version of the driver. */
- uint8_t ver_upd;
-
- uint8_t unused_0;
- uint16_t unused_1;
-
- /*
- * This is a 32-bit timestamp provided by the driver for keep alive. The
- * timestamp is in multiples of 1ms.
- */
- uint32_t timestamp;
-
- uint32_t unused_2;
-
- /*
- * This is a 256-bit bit mask provided by the PF driver for letting the
- * HWRM know what commands issued by the VF driver to the HWRM should be
- * forwarded to the PF driver. Nth bit refers to the Nth req_type.
- * Setting Nth bit to 1 indicates that requests from the VF driver with
- * req_type equal to N shall be forwarded to the parent PF driver. This
- * field is not valid for the VF driver.
- */
- uint32_t vf_req_fwd[8];
-
- /*
- * This is a 256-bit bit mask provided by the function driver (PF or VF
- * driver) to indicate the list of asynchronous event completions to be
- * forwarded. Nth bit refers to the Nth event_id. Setting Nth bit to 1
- * by the function driver shall result in the HWRM forwarding
- * asynchronous event completion with event_id equal to N. If all bits
- * are set to 0 (value of 0), then the HWRM shall not forward any
- * asynchronous event completion to this function driver.
- */
- uint32_t async_event_fwd[8];
+ uint32_t stat_ctx_id;
+ /* ID of the statistics context that is being queried. */
+ uint32_t unused_0;
} __attribute__((packed));
/* Output (16 bytes) */
-
-struct hwrm_func_drv_rgtr_output {
+struct hwrm_stat_ctx_clr_stats_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
uint32_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
uint8_t unused_3;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
-/* hwrm_func_drv_unrgtr */
+/* hwrm_exec_fwd_resp */
/*
- * Description: This command is used by the function driver to un register with
- * the HWRM. A function driver shall implement this command. A function driver
- * shall use this command during the driver unloading.
+ * Description: This command is used to send an encapsulated request to the
+ * HWRM. This command instructs the HWRM to execute the request and forward the
+ * response of the encapsulated request to the location specified in the
+ * original request that is encapsulated. The target id of this command shall be
+ * set to 0xFFFF (HWRM). The response location in this command shall be used to
+ * acknowledge the receipt of the encapsulated request and forwarding of the
+ * response.
*/
-/* Input (24 bytes) */
-
-struct hwrm_func_drv_unrgtr_input {
- /*
- * This value indicates what type of request this is. The format for the
- * rest of the command is determined by this field.
- */
+/* Input (128 bytes) */
+struct hwrm_exec_fwd_resp_input {
uint16_t req_type;
-
/*
- * This value indicates the what completion ring the request will be
- * optionally completed on. If the value is -1, then no CR completion
- * will be generated. Any other value must be a valid CR ring_id value
- * for this function.
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
*/
uint16_t cmpl_ring;
-
- /* This value indicates the command sequence number. */
- uint16_t seq_id;
-
/*
- * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids
- * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
*/
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
uint16_t target_id;
-
/*
- * This is the host address where the response will be written when the
- * request is complete. This area must be 16B aligned and must be
- * cleared to zero before the request is made.
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
*/
uint64_t resp_addr;
-
/*
- * When this bit is '1', the function driver is notifying the HWRM to
- * prepare for the shutdown.
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
*/
- #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \
- UINT32_C(0x1)
- uint32_t flags;
-
- uint32_t unused_0;
+ uint32_t encap_request[26];
+ /*
+ * This is an encapsulated request. This request should be
+ * executed by the HWRM and the response should be provided in
+ * the response buffer inside the encapsulated request.
+ */
+ uint16_t encap_resp_target_id;
+ /*
+ * This value indicates the target id of the response to the
+ * encapsulated request. 0x0 - 0xFFF8 - Used for function ids
+ * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF -
+ * HWRM
+ */
+ uint16_t unused_0[3];
} __attribute__((packed));
/* Output (16 bytes) */
-struct hwrm_func_drv_unrgtr_output {
+struct hwrm_exec_fwd_resp_output {
+ uint16_t error_code;
/*
- * Pass/Fail or error type Note: receiver to verify the in parameters,
- * and fail the call with an error when appropriate
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
*/
- uint16_t error_code;
-
- /* This field returns the type of original request. */
uint16_t req_type;
-
- /* This field provides original sequence number of the command. */
+ /* This field returns the type of original request. */
uint16_t seq_id;
-
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
/*
- * This field is the length of the response in bytes. The last byte of
- * the response is a valid flag that will read as '1' when the command
- * has been completely written to memory.
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
*/
- uint16_t resp_len;
-
uint32_t unused_0;
uint8_t unused_1;
uint8_t unused_2;
uint8_t unused_3;
-
+ uint8_t valid;
/*
- * This field is used in Output records to indicate that the output is
- * completely written to RAM. This field should be read as '1' to
- * indicate that the output has been completely written. When writing a
- * command completion or response to an internal processor, the order of
- * writes has to be such that this field is written last.
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
*/
- uint8_t valid;
} __attribute__((packed));
#endif
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 3c169730..2a3893a1 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -166,6 +166,7 @@ rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
struct bond_dev_private *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
+ uint32_t vlan_filter_bmp_size;
/* now do all data allocation - for eth_dev structure, dummy pci driver
* and internal (private) data
@@ -189,7 +190,7 @@ rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
}
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL) {
RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
goto err;
@@ -260,6 +261,27 @@ rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
goto err;
}
+ vlan_filter_bmp_size =
+ rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
+ internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
+ RTE_CACHE_LINE_SIZE);
+ if (internals->vlan_filter_bmpmem == NULL) {
+ RTE_BOND_LOG(ERR,
+ "Failed to allocate vlan bitmap for bonded device %u\n",
+ eth_dev->data->port_id);
+ goto err;
+ }
+
+ internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
+ internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
+ if (internals->vlan_filter_bmp == NULL) {
+ RTE_BOND_LOG(ERR,
+ "Failed to init vlan bitmap for bonded device %u\n",
+ eth_dev->data->port_id);
+ rte_free(internals->vlan_filter_bmpmem);
+ goto err;
+ }
+
return eth_dev->data->port_id;
err:
@@ -299,6 +321,9 @@ rte_eth_bond_free(const char *name)
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
+ internals = eth_dev->data->dev_private;
+ rte_bitmap_free(internals->vlan_filter_bmp);
+ rte_free(internals->vlan_filter_bmpmem);
rte_free(eth_dev->data->dev_private);
rte_free(eth_dev->data->mac_addrs);
@@ -308,6 +333,46 @@ rte_eth_bond_free(const char *name)
}
static int
+slave_vlan_filter_set(uint8_t bonded_port_id, uint8_t slave_port_id)
+{
+ struct rte_eth_dev *bonded_eth_dev;
+ struct bond_dev_private *internals;
+ int found;
+ int res = 0;
+ uint64_t slab = 0;
+ uint32_t pos = 0;
+ uint16_t first;
+
+ bonded_eth_dev = &rte_eth_devices[bonded_port_id];
+ if (bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter == 0)
+ return 0;
+
+ internals = bonded_eth_dev->data->dev_private;
+ found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab);
+ first = pos;
+
+ if (!found)
+ return 0;
+
+ do {
+ uint32_t i;
+ uint64_t mask;
+
+ for (i = 0, mask = 1;
+ i < RTE_BITMAP_SLAB_BIT_SIZE;
+ i ++, mask <<= 1) {
+ if (unlikely(slab & mask))
+ res = rte_eth_dev_vlan_filter(slave_port_id,
+ (uint16_t)pos, 1);
+ }
+ found = rte_bitmap_scan(internals->vlan_filter_bmp,
+ &pos, &slab);
+ } while (found && first != pos && res == 0);
+
+ return res;
+}
+
+static int
__eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
@@ -427,6 +492,9 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
activate_slave(bonded_eth_dev, slave_port_id);
}
}
+
+ slave_vlan_filter_set(bonded_port_id, slave_port_id);
+
return 0;
}
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 25fe00a6..a80b6fa9 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -42,7 +42,7 @@
#include <rte_ip_frag.h>
#include <rte_devargs.h>
#include <rte_kvargs.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_alarm.h>
#include <rte_cycles.h>
@@ -122,6 +122,15 @@ bond_ethdev_rx_burst_active_backup(void *queue, struct rte_mbuf **bufs,
bd_rx_q->queue_id, bufs, nb_pkts);
}
+static inline uint8_t
+is_lacp_packets(uint16_t ethertype, uint8_t subtype, uint16_t vlan_tci)
+{
+ const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
+
+ return !vlan_tci && (ethertype == ether_type_slow_be &&
+ (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP));
+}
+
static uint16_t
bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts)
@@ -141,6 +150,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
uint8_t collecting; /* current slave collecting status */
const uint8_t promisc = internals->promiscuous_en;
uint8_t i, j, k;
+ uint8_t subtype;
rte_eth_macaddr_get(internals->port_id, &bond_mac);
/* Copy slave list to protect against slave up/down changes during tx
@@ -166,10 +176,12 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
rte_prefetch0(rte_pktmbuf_mtod(bufs[j + 3], void *));
hdr = rte_pktmbuf_mtod(bufs[j], struct ether_hdr *);
+ subtype = ((struct slow_protocol_frame *)hdr)->slow_protocol.subtype;
+
/* Remove packet from array if it is slow packet or slave is not
* in collecting state or bondign interface is not in promiscus
* mode and packet address does not match. */
- if (unlikely(hdr->ether_type == ether_type_slow_be ||
+ if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]->vlan_tci) ||
!collecting || (!promisc &&
!is_multicast_ether_addr(&hdr->d_addr) &&
!is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
@@ -1335,6 +1347,9 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
}
+ slave_eth_dev->data->dev_conf.rxmode.hw_vlan_filter =
+ bonded_eth_dev->data->dev_conf.rxmode.hw_vlan_filter;
+
/* Configure device */
errval = rte_eth_dev_configure(slave_eth_dev->data->port_id,
bonded_eth_dev->data->nb_rx_queues,
@@ -1637,7 +1652,10 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
void
bond_ethdev_close(struct rte_eth_dev *dev)
{
+ struct bond_dev_private *internals = dev->data->dev_private;
+
bond_ethdev_free_queues(dev);
+ rte_bitmap_reset(internals->vlan_filter_bmp);
}
/* forward declaration */
@@ -1667,6 +1685,35 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
static int
+bond_ethdev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ int res;
+ uint8_t i;
+ struct bond_dev_private *internals = dev->data->dev_private;
+
+ /* don't do this while a slave is being added */
+ rte_spinlock_lock(&internals->lock);
+
+ if (on)
+ rte_bitmap_set(internals->vlan_filter_bmp, vlan_id);
+ else
+ rte_bitmap_clear(internals->vlan_filter_bmp, vlan_id);
+
+ for (i = 0; i < internals->slave_count; i++) {
+ uint8_t port_id = internals->slaves[i].port_id;
+
+ res = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
+ if (res == ENOTSUP)
+ RTE_LOG(WARNING, PMD,
+ "Setting VLAN filter on slave port %u not supported.\n",
+ port_id);
+ }
+
+ rte_spinlock_unlock(&internals->lock);
+ return 0;
+}
+
+static int
bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id __rte_unused,
const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
@@ -1923,7 +1970,7 @@ bond_ethdev_delayed_lsc_propagation(void *arg)
return;
_rte_eth_dev_callback_process((struct rte_eth_dev *)arg,
- RTE_ETH_EVENT_INTR_LSC);
+ RTE_ETH_EVENT_INTR_LSC, NULL);
}
void
@@ -2044,7 +2091,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
(void *)bonded_eth_dev);
else
_rte_eth_dev_callback_process(bonded_eth_dev,
- RTE_ETH_EVENT_INTR_LSC);
+ RTE_ETH_EVENT_INTR_LSC, NULL);
} else {
if (internals->link_down_delay_ms > 0)
@@ -2053,7 +2100,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type,
(void *)bonded_eth_dev);
else
_rte_eth_dev_callback_process(bonded_eth_dev,
- RTE_ETH_EVENT_INTR_LSC);
+ RTE_ETH_EVENT_INTR_LSC, NULL);
}
}
}
@@ -2171,6 +2218,7 @@ const struct eth_dev_ops default_dev_ops = {
.dev_close = bond_ethdev_close,
.dev_configure = bond_ethdev_configure,
.dev_infos_get = bond_ethdev_info,
+ .vlan_filter_set = bond_ethdev_vlan_filter_set,
.rx_queue_setup = bond_ethdev_rx_queue_setup,
.tx_queue_setup = bond_ethdev_tx_queue_setup,
.rx_queue_release = bond_ethdev_rx_queue_release,
@@ -2187,7 +2235,7 @@ const struct eth_dev_ops default_dev_ops = {
};
static int
-bond_init(const char *name, const char *params)
+bond_probe(const char *name, const char *params)
{
struct bond_dev_private *internals;
struct rte_kvargs *kvlist;
@@ -2254,7 +2302,7 @@ parse_error:
}
static int
-bond_uninit(const char *name)
+bond_remove(const char *name)
{
int ret;
@@ -2518,15 +2566,15 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
return 0;
}
-static struct rte_driver bond_drv = {
- .type = PMD_VDEV,
- .init = bond_init,
- .uninit = bond_uninit,
+static struct rte_vdev_driver bond_drv = {
+ .probe = bond_probe,
+ .remove = bond_remove,
};
-PMD_REGISTER_DRIVER(bond_drv, eth_bond);
+RTE_PMD_REGISTER_VDEV(net_bonding, bond_drv);
+RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
-DRIVER_REGISTER_PARAM_STRING(eth_bond,
+RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
"slave=<ifc> "
"primary=<ifc> "
"mode=[0-6] "
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index 2bdc9efa..d95d440b 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -36,6 +36,7 @@
#include <rte_ethdev.h>
#include <rte_spinlock.h>
+#include <rte_bitmap.h>
#include "rte_eth_bond.h"
#include "rte_eth_bond_8023ad_private.h"
@@ -172,6 +173,9 @@ struct bond_dev_private {
uint32_t candidate_max_rx_pktlen;
uint32_t max_rx_pktlen;
+
+ void *vlan_filter_bmpmem; /* enabled vlan filter bitmap */
+ struct rte_bitmap *vlan_filter_bmp;
};
extern const struct eth_dev_ops default_dev_ops;
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index 9208a615..b7f28ebb 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -1039,33 +1039,14 @@ out_free_adapter:
static struct eth_driver rte_cxgbe_pmd = {
.pci_drv = {
- .name = "rte_cxgbe_pmd",
.id_table = cxgb4_pci_tbl,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_cxgbe_dev_init,
.dev_private_size = sizeof(struct port_info),
};
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of PCI CXGBE devices.
- */
-static int rte_cxgbe_pmd_init(const char *name __rte_unused,
- const char *params __rte_unused)
-{
- CXGBE_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_cxgbe_pmd);
- return 0;
-}
-
-static struct rte_driver rte_cxgbe_driver = {
- .type = PMD_PDEV,
- .init = rte_cxgbe_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_cxgbe_driver, cxgb4);
-DRIVER_REGISTER_PCI_TABLE(cxgb4, cxgb4_pci_tbl);
-
+RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index ceaf5ab2..922155b4 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -1150,7 +1150,7 @@ int cxgbe_probe(struct adapter *adapter)
*/
/* reserve an ethdev entry */
- pi->eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI);
+ pi->eth_dev = rte_eth_dev_allocate(name);
if (!pi->eth_dev)
goto out_free;
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index ab5a842a..736f08ce 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -1645,7 +1645,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size = cxgbe_roundup(iq->size, 16);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.name, fwevtq ? "fwq_ring" : "rx_ring",
+ eth_dev->driver->pci_drv.driver.name,
+ fwevtq ? "fwq_ring" : "rx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1697,7 +1698,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = cxgbe_roundup(fl->size, 8);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.name,
+ eth_dev->driver->pci_drv.driver.name,
fwevtq ? "fwq_ring" : "fl_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1893,7 +1894,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.name, "tx_ring",
+ eth_dev->driver->pci_drv.driver.name, "tx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index ad104ed7..aee3d340 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -391,10 +391,11 @@ eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
static struct eth_driver rte_em_pmd = {
.pci_drv = {
- .name = "rte_em_pmd",
.id_table = pci_id_em_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_em_dev_init,
.eth_dev_uninit = eth_em_dev_uninit,
@@ -402,13 +403,6 @@ static struct eth_driver rte_em_pmd = {
};
static int
-rte_em_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- rte_eth_driver_register(&rte_em_pmd);
- return 0;
-}
-
-static int
em_hw_init(struct e1000_hw *hw)
{
int diag;
@@ -645,6 +639,7 @@ eth_em_start(struct rte_eth_dev *dev)
speeds = &dev->data->dev_conf.link_speeds;
if (*speeds == ETH_LINK_SPEED_AUTONEG) {
hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+ hw->mac.autoneg = 1;
} else {
num_speeds = 0;
autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
@@ -680,6 +675,17 @@ eth_em_start(struct rte_eth_dev *dev)
}
if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
goto error_invalid_config;
+
+ /* Set/reset the mac.autoneg based on the link speed,
+ * fixed or not
+ */
+ if (!autoneg) {
+ hw->mac.autoneg = 0;
+ hw->mac.forced_speed_duplex =
+ hw->phy.autoneg_advertised;
+ } else {
+ hw->mac.autoneg = 1;
+ }
}
e1000_setup_link(hw);
@@ -1605,7 +1611,7 @@ eth_em_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
eth_em_interrupt_get_status(dev);
eth_em_interrupt_action(dev);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int
@@ -1799,10 +1805,5 @@ eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
return 0;
}
-struct rte_driver em_pmd_drv = {
- .type = PMD_PDEV,
- .init = rte_em_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(em_pmd_drv, em);
-DRIVER_REGISTER_PCI_TABLE(em, pci_id_em_map);
+RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 6d8750a8..41f51c0f 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -56,7 +56,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index fbf4d090..2fddf0cb 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -306,22 +306,57 @@ static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
* The set of PCI devices this driver supports
*/
static const struct rte_pci_id pci_id_igb_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{0},
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) },
+
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) },
+
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) },
+
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) },
+ { .vendor_id = 0, /* sentinel */ },
};
/*
* The set of PCI devices this driver supports (for 82576&I350 VF)
*/
static const struct rte_pci_id pci_id_igbvf_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{0},
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) },
+ { .vendor_id = 0, /* sentinel */ },
};
static const struct rte_eth_desc_lim rx_desc_lim = {
@@ -1043,10 +1078,11 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
static struct eth_driver rte_igb_pmd = {
.pci_drv = {
- .name = "rte_igb_pmd",
.id_table = pci_id_igb_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_igb_dev_init,
.eth_dev_uninit = eth_igb_dev_uninit,
@@ -1058,22 +1094,16 @@ static struct eth_driver rte_igb_pmd = {
*/
static struct eth_driver rte_igbvf_pmd = {
.pci_drv = {
- .name = "rte_igbvf_pmd",
.id_table = pci_id_igbvf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_igbvf_dev_init,
.eth_dev_uninit = eth_igbvf_dev_uninit,
.dev_private_size = sizeof(struct e1000_adapter),
};
-static int
-rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- rte_eth_driver_register(&rte_igb_pmd);
- return 0;
-}
-
static void
igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
{
@@ -1085,20 +1115,6 @@ igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
}
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
- */
-static int
-rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_igbvf_pmd);
- return 0;
-}
-
static int
igb_check_mq_mode(struct rte_eth_dev *dev)
{
@@ -1311,6 +1327,7 @@ eth_igb_start(struct rte_eth_dev *dev)
speeds = &dev->data->dev_conf.link_speeds;
if (*speeds == ETH_LINK_SPEED_AUTONEG) {
hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+ hw->mac.autoneg = 1;
} else {
num_speeds = 0;
autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
@@ -1346,6 +1363,17 @@ eth_igb_start(struct rte_eth_dev *dev)
}
if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
goto error_invalid_config;
+
+ /* Set/reset the mac.autoneg based on the link speed,
+ * fixed or not
+ */
+ if (!autoneg) {
+ hw->mac.autoneg = 0;
+ hw->mac.forced_speed_duplex =
+ hw->phy.autoneg_advertised;
+ } else {
+ hw->mac.autoneg = 1;
+ }
}
e1000_setup_link(hw);
@@ -2667,7 +2695,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
E1000_WRITE_REG(hw, E1000_TCTL, tctl);
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
E1000_WRITE_FLUSH(hw);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
return 0;
@@ -2727,7 +2755,7 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
/* PF reset VF event */
if (in_msg == E1000_PF_CONTROL_MSG)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
}
static int
@@ -5049,16 +5077,6 @@ eth_igb_set_eeprom(struct rte_eth_dev *dev,
return nvm->ops.write(hw, first, length, data);
}
-static struct rte_driver pmd_igb_drv = {
- .type = PMD_PDEV,
- .init = rte_igb_pmd_init,
-};
-
-static struct rte_driver pmd_igbvf_drv = {
- .type = PMD_PDEV,
- .init = rte_igbvf_pmd_init,
-};
-
static int
eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
@@ -5220,7 +5238,7 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
E1000_WRITE_FLUSH(hw);
}
-PMD_REGISTER_DRIVER(pmd_igb_drv, igb);
-DRIVER_REGISTER_PCI_TABLE(igb, pci_id_igb_map);
-PMD_REGISTER_DRIVER(pmd_igbvf_drv, igbvf);
-DRIVER_REGISTER_PCI_TABLE(igbvf, pci_id_igbvf_map);
+RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
+RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index c5db727d..dbd37acc 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -56,7 +56,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
@@ -748,7 +747,9 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
*/
static uint64_t error_to_pkt_flags_map[4] = {
- 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
};
return error_to_pkt_flags_map[(rx_status >>
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 30581610..ab9a178f 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -676,8 +676,7 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring)
if (m)
__rte_mbuf_raw_free(m);
- ring->next_to_clean =
- ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size);
+ ring->next_to_clean++;
}
}
@@ -692,8 +691,7 @@ static void ena_tx_queue_release_bufs(struct ena_ring *ring)
if (tx_buf->mbuf)
rte_pktmbuf_free(tx_buf->mbuf);
- ring->next_to_clean =
- ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size);
+ ring->next_to_clean++;
}
}
@@ -926,8 +924,8 @@ static int ena_queue_restart(struct ena_ring *ring)
if (ring->type == ENA_RING_TYPE_TX)
return 0;
- rc = ena_populate_rx_queue(ring, ring->ring_size - 1);
- if ((unsigned int)rc != ring->ring_size - 1) {
+ rc = ena_populate_rx_queue(ring, ring->ring_size);
+ if ((unsigned int)rc != ring->ring_size) {
PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n");
return (-1);
}
@@ -962,6 +960,13 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev,
return -1;
}
+ if (!rte_is_power_of_2(nb_desc)) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported size of RX queue: %d is not a power of 2.",
+ nb_desc);
+ return -EINVAL;
+ }
+
if (nb_desc > adapter->tx_ring_size) {
RTE_LOG(ERR, PMD,
"Unsupported size of TX queue (max size: %d)\n",
@@ -1056,6 +1061,13 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev,
return -1;
}
+ if (!rte_is_power_of_2(nb_desc)) {
+ RTE_LOG(ERR, PMD,
+ "Unsupported size of TX queue: %d is not a power of 2.",
+ nb_desc);
+ return -EINVAL;
+ }
+
if (nb_desc > adapter->rx_ring_size) {
RTE_LOG(ERR, PMD,
"Unsupported size of RX queue (max size: %d)\n",
@@ -1115,23 +1127,25 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
{
unsigned int i;
int rc;
- unsigned int ring_size = rxq->ring_size;
- unsigned int ring_mask = ring_size - 1;
- int next_to_use = rxq->next_to_use & ring_mask;
+ uint16_t ring_size = rxq->ring_size;
+ uint16_t ring_mask = ring_size - 1;
+ uint16_t next_to_use = rxq->next_to_use;
+ uint16_t in_use;
struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0];
if (unlikely(!count))
return 0;
- ena_assert_msg((((ENA_CIRC_COUNT(rxq->next_to_use, rxq->next_to_clean,
- rxq->ring_size)) +
- count) < rxq->ring_size), "bad ring state");
+ in_use = rxq->next_to_use - rxq->next_to_clean;
+ ena_assert_msg(((in_use + count) <= ring_size), "bad ring state");
- count = RTE_MIN(count, ring_size - next_to_use);
+ count = RTE_MIN(count,
+ (uint16_t)(ring_size - (next_to_use & ring_mask)));
/* get resources for incoming packets */
rc = rte_mempool_get_bulk(rxq->mb_pool,
- (void **)(&mbufs[next_to_use]), count);
+ (void **)(&mbufs[next_to_use & ring_mask]),
+ count);
if (unlikely(rc < 0)) {
rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf);
PMD_RX_LOG(DEBUG, "there are no enough free buffers");
@@ -1139,7 +1153,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
}
for (i = 0; i < count; i++) {
- struct rte_mbuf *mbuf = mbufs[next_to_use];
+ uint16_t next_to_use_masked = next_to_use & ring_mask;
+ struct rte_mbuf *mbuf = mbufs[next_to_use_masked];
struct ena_com_buf ebuf;
rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]);
@@ -1148,12 +1163,12 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
/* pass resource to device */
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
- &ebuf, next_to_use);
+ &ebuf, next_to_use_masked);
if (unlikely(rc)) {
RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
break;
}
- next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, ring_size);
+ next_to_use++;
}
/* When we submitted free recources to device... */
@@ -1475,7 +1490,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
unsigned int ring_size = rx_ring->ring_size;
unsigned int ring_mask = ring_size - 1;
uint16_t next_to_clean = rx_ring->next_to_clean;
- int desc_in_use = 0;
+ uint16_t desc_in_use = 0;
unsigned int recv_idx = 0;
struct rte_mbuf *mbuf = NULL;
struct rte_mbuf *mbuf_head = NULL;
@@ -1493,8 +1508,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return 0;
}
- desc_in_use = ENA_CIRC_COUNT(rx_ring->next_to_use,
- next_to_clean, ring_size);
+ desc_in_use = rx_ring->next_to_use - next_to_clean;
if (unlikely(nb_pkts > desc_in_use))
nb_pkts = desc_in_use;
@@ -1535,8 +1549,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
mbuf_prev = mbuf;
segments++;
- next_to_clean =
- ENA_RX_RING_IDX_NEXT(next_to_clean, ring_size);
+ next_to_clean++;
}
/* fill mbuf attributes if any */
@@ -1549,10 +1562,10 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
}
/* Burst refill to save doorbells, memory barriers, const interval */
- if (ring_size - desc_in_use - 1 > ENA_RING_DESCS_RATIO(ring_size))
- ena_populate_rx_queue(rx_ring, ring_size - desc_in_use - 1);
+ if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size))
+ ena_populate_rx_queue(rx_ring, ring_size - desc_in_use);
- rx_ring->next_to_clean = next_to_clean & ring_mask;
+ rx_ring->next_to_clean = next_to_clean;
return recv_idx;
}
@@ -1561,7 +1574,8 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
- unsigned int next_to_use = tx_ring->next_to_use;
+ uint16_t next_to_use = tx_ring->next_to_use;
+ uint16_t next_to_clean = tx_ring->next_to_clean;
struct rte_mbuf *mbuf;
unsigned int ring_size = tx_ring->ring_size;
unsigned int ring_mask = ring_size - 1;
@@ -1569,7 +1583,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
struct ena_tx_buffer *tx_info;
struct ena_com_buf *ebuf;
uint16_t rc, req_id, total_tx_descs = 0;
- uint16_t sent_idx = 0;
+ uint16_t sent_idx = 0, empty_tx_reqs;
int nb_hw_desc;
/* Check adapter state */
@@ -1579,10 +1593,14 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return 0;
}
+ empty_tx_reqs = ring_size - (next_to_use - next_to_clean);
+ if (nb_pkts > empty_tx_reqs)
+ nb_pkts = empty_tx_reqs;
+
for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) {
mbuf = tx_pkts[sent_idx];
- req_id = tx_ring->empty_tx_reqs[next_to_use];
+ req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask];
tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info->mbuf = mbuf;
tx_info->num_of_bufs = 0;
@@ -1645,7 +1663,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_info->tx_descs = nb_hw_desc;
- next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, ring_size);
+ next_to_use++;
}
/* If there are ready packets to be xmitted... */
@@ -1668,10 +1686,8 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
rte_pktmbuf_free(mbuf);
/* Put back descriptor to the ring for reuse */
- tx_ring->empty_tx_reqs[tx_ring->next_to_clean] = req_id;
- tx_ring->next_to_clean =
- ENA_TX_RING_IDX_NEXT(tx_ring->next_to_clean,
- tx_ring->ring_size);
+ tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
+ next_to_clean++;
/* If too many descs to clean, leave it for another run */
if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size)))
@@ -1681,33 +1697,22 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
if (total_tx_descs > 0) {
/* acknowledge completion of sent packets */
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
+ tx_ring->next_to_clean = next_to_clean;
}
return sent_idx;
}
static struct eth_driver rte_ena_pmd = {
- {
- .name = "rte_ena_pmd",
+ .pci_drv = {
.id_table = pci_id_ena_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_ena_dev_init,
.dev_private_size = sizeof(struct ena_adapter),
};
-static int
-rte_ena_pmd_init(const char *name __rte_unused,
- const char *params __rte_unused)
-{
- rte_eth_driver_register(&rte_ena_pmd);
- return 0;
-};
-
-struct rte_driver ena_pmd_drv = {
- .type = PMD_PDEV,
- .init = rte_ena_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(ena_pmd_drv, ena);
-DRIVER_REGISTER_PCI_TABLE(ena, pci_id_ena_map);
+RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 61390a93..4c7edbb9 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -42,33 +42,13 @@
#define ENA_MEM_BAR 2
#define ENA_MAX_NUM_QUEUES 128
-
-#define ENA_DEFAULT_TX_SW_DESCS (1024)
-#define ENA_DEFAULT_TX_HW_DESCS (1024)
#define ENA_DEFAULT_RING_SIZE (1024)
-
#define ENA_MIN_FRAME_LEN 64
-
-#define ENA_NAME_MAX_LEN 20
-#define ENA_IRQNAME_SIZE 40
-
-#define ENA_PKT_MAX_BUFS 17
+#define ENA_NAME_MAX_LEN 20
+#define ENA_PKT_MAX_BUFS 17
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
-#define ENA_CIRC_COUNT(head, tail, size) \
- (((uint16_t)((uint16_t)(head) - (uint16_t)(tail))) & ((size) - 1))
-
-#define ENA_CIRC_INC(index, step, size) \
- ((uint16_t)(index) + (uint16_t)(step))
-#define ENA_CIRC_INC_WRAP(index, step, size) \
- (((uint16_t)(index) + (uint16_t)(step)) & ((size) - 1))
-
-#define ENA_TX_RING_IDX_NEXT(idx, ring_size) \
- ENA_CIRC_INC_WRAP(idx, 1, ring_size)
-#define ENA_RX_RING_IDX_NEXT(idx, ring_size) \
- ENA_CIRC_INC_WRAP(idx, 1, ring_size)
-
struct ena_adapter;
enum ena_ring_type {
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 4db21a4d..84e4840a 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -470,6 +470,18 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
}
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
+{
+ u64 a0 = (u32)CMD_ADD_ADV_FILTER, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err)
+ return 0;
+ return (a1 >= (u32)FILTER_DPDK_1);
+}
+
static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
@@ -1007,7 +1019,7 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
* @data: filter data
*/
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter *data)
+ struct filter_v2 *data)
{
u64 a0, a1;
int wait = 1000;
@@ -1016,11 +1028,20 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter_tlv *tlv, *tlv_va;
struct filter_action *action;
u64 tlv_size;
+ u32 filter_size;
static unsigned int unique_id;
char z_name[RTE_MEMZONE_NAMESIZE];
+ enum vnic_devcmd_cmd dev_cmd;
+
if (cmd == CLSF_ADD) {
- tlv_size = sizeof(struct filter) +
+ if (data->type == FILTER_DPDK_1)
+ dev_cmd = CMD_ADD_ADV_FILTER;
+ else
+ dev_cmd = CMD_ADD_FILTER;
+
+ filter_size = vnic_filter_size(data);
+ tlv_size = filter_size +
sizeof(struct filter_action) +
2*sizeof(struct filter_tlv);
snprintf((char *)z_name, sizeof(z_name),
@@ -1034,12 +1055,12 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
a1 = tlv_size;
memset(tlv, 0, tlv_size);
tlv->type = CLSF_TLV_FILTER;
- tlv->length = sizeof(struct filter);
- *(struct filter *)&tlv->val = *data;
+ tlv->length = filter_size;
+ memcpy(&tlv->val, (void *)data, filter_size);
tlv = (struct filter_tlv *)((char *)tlv +
sizeof(struct filter_tlv) +
- sizeof(struct filter));
+ filter_size);
tlv->type = CLSF_TLV_ACTION;
tlv->length = sizeof(struct filter_action);
@@ -1047,7 +1068,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
action->type = FILTER_ACTION_RQ_STEERING;
action->u.rq_idx = *entry;
- ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
+ ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
*entry = (u16)a0;
vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
} else if (cmd == CLSF_DEL) {
diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h
index 689442f3..06ebd4ce 100644
--- a/drivers/net/enic/base/vnic_dev.h
+++ b/drivers/net/enic/base/vnic_dev.h
@@ -134,6 +134,7 @@ void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
@@ -201,7 +202,7 @@ int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter *data);
+ struct filter_v2 *data);
#ifdef ENIC_VXLAN
int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
u8 overlay, u8 config);
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index b3d5a6cc..785fd6fd 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* Copyright (c) 2014, Cisco Systems, Inc.
@@ -126,7 +126,8 @@ enum vnic_devcmd_cmd {
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
- * out: a0=value */
+ * out: a0=value
+ */
CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
/* stats clear */
@@ -146,8 +147,9 @@ enum vnic_devcmd_cmd {
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
/* MAC address in (u48)a0 */
- CMD_GET_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+#define CMD_GET_MAC_ADDR CMD_MAC_ADDR /* some uses are aliased */
/* add addr from (u48)a0 */
CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
@@ -387,9 +389,8 @@ enum vnic_devcmd_cmd {
* Subvnic migration from MQ <--> VF.
* Enable the LIF migration from MQ to VF and vice versa. MQ and VF
* indexes are statically bound at the time of initialization.
- * Based on the
- * direction of migration, the resources of either MQ or the VF shall
- * be attached to the LIF.
+ * Based on the direction of migration, the resources of either MQ or
+ * the VF shall be attached to the LIF.
* in: (u32)a0=Direction of Migration
* 0=> Migrate to VF
* 1=> Migrate to MQ
@@ -397,7 +398,6 @@ enum vnic_devcmd_cmd {
*/
CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53),
-
/*
* Register / Deregister the notification block for MQ subvnics
* in:
@@ -433,6 +433,10 @@ enum vnic_devcmd_cmd {
* in: (u64) a0= filter address
* (u32) a1= size of filter
* out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if capability query supported
+ * (u64) a1= MAX filter type supported
*/
CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
@@ -471,23 +475,133 @@ enum vnic_devcmd_cmd {
CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
/*
- * Enable/Disable overlay offloads on the given vnic
+ * UEFI BOOT API: (u64)a0= UEFI FLS_CMD_xxx
+ * (ui64)a1= paddr for the info buffer
+ */
+ CMD_FC_REQ = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 64),
+
+ /*
+ * Return the iSCSI config details required by the EFI Option ROM
+ * in: (u32) a0=0 Get Boot Info for PXE eNIC as per pxe_boot_config_t
+ * a0=1 Get Boot info for iSCSI enic as per
+ * iscsi_boot_efi_cfg_t
+ * in: (u64) a1=Host address where iSCSI config info is returned
+ */
+ CMD_VNIC_BOOT_CONFIG_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 65),
+
+ /*
+ * Create a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u32) a1 = Remote QP
+ * (u32) a2 = RDMA-RQ
+ * (u16) a3 = RQ Res Group
+ * (u16) a4 = SQ Res Group
+ * (u32) a5 = Protection Domain
+ * (u64) a6 = Remote MAC
+ * (u32) a7 = start PSN
+ * (u16) a8 = MSS
+ * (u32) a9 = protocol version
+ */
+ CMD_RDMA_QP_CREATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 66),
+
+ /*
+ * Delete a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ */
+ CMD_RDMA_QP_DELETE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 67),
+
+ /*
+ * Retrieve a Queue Pair's status information (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u64) a1 = host buffer addr for QP status struct
+ * (u32) a2 = length of the buffer
+ */
+ CMD_RDMA_QP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 68),
+
+ /*
+ * Use this devcmd for agreeing on the highest common version supported
+ * by both driver and fw for by features who need such a facility.
+ * in: (u64) a0 = feature (driver requests for the supported versions
+ * on this feature)
+ * out: (u64) a0 = bitmap of all supported versions for that feature
+ */
+ CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69),
+
+ /*
+ * Initialize the RDMA notification work queue
+ * in: (u64) a0 = host buffer address
+ * in: (u16) a1 = number of entries in buffer
+ * in: (u16) a2 = resource group number
+ * in: (u16) a3 = CQ number to post completion
+ */
+ CMD_RDMA_INIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 70),
+
+ /*
+ * De-init the RDMA notification work queue
+ * in: (u64) a0=resource group number
+ */
+ CMD_RDMA_DEINIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 71),
+
+ /*
+ * Control (Enable/Disable) overlay offloads on the given vnic
* in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
* a0 = OVERLAY_FEATURE_VXLAN : VxLAN
- * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable
- * a1 = OVERLAY_OFFLOAD_DISABLE : Disable
+ * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or
+ * a1 = OVERLAY_OFFLOAD_DISABLE : Disable or
+ * a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2
*/
- CMD_OVERLAY_OFFLOAD_ENABLE_DISABLE =
- _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
+ CMD_OVERLAY_OFFLOAD_CTRL =
+ _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
/*
* Configuration of overlay offloads feature on a given vNIC
- * in: (u8) a0 = DEVCMD_OVERLAY_NVGRE : NVGRE
- * a0 = DEVCMD_OVERLAY_VXLAN : VxLAN
- * in: (u8) a1 = VXLAN_PORT_UPDATE : VxLAN
- * in: (u16) a2 = unsigned short int port information
+ * in: (u8) a0 = OVERLAY_CFG_VXLAN_PORT_UPDATE : VxLAN
+ * in: (u16) a1 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Return the configured name for the device
+ * in: (u64) a0=Host address where the name is copied
+ * (u32) a1=Size of the buffer
+ */
+ CMD_GET_CONFIG_NAME = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 74),
+
+ /*
+ * Enable group interrupt for the VF
+ * in: (u32) a0 = GRPINTR_ENABLE : enable
+ * a0 = GRPINTR_DISABLE : disable
+ * a0 = GRPINTR_UPD_VECT: update group vector addr
+ * in: (u32) a1 = interrupt group count
+ * in: (u64) a2 = Start of host buffer address for DMAing group
+ * vector bitmap
+ * in: (u64) a3 = Stride between group vectors
+ */
+ CMD_CONFIG_GRPINTR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 75),
+
+ /*
+ * Set cq arrary base and size in a list of consective wqs and
+ * rqs for a device
+ * in: (u16) a0 = the wq relative index in the device.
+ * -1 indicates skipping wq configuration
+ * in: (u16) a1 = the wcq relative index in the device
+ * in: (u16) a2 = the rq relative index in the device
+ * -1 indicates skipping rq configuration
+ * in: (u16) a3 = the rcq relative index in the device
+ */
+ CMD_CONFIG_CQ_ARRAY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 76),
+
+ /*
+ * Add an advanced filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if capabliity query supported
+ * (u64) a1= MAX filter type supported
+ */
+ CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77),
};
/* CMD_ENABLE2 flags */
@@ -520,6 +634,9 @@ enum vnic_devcmd_status {
STAT_NONE = 0,
STAT_BUSY = 1 << 0, /* cmd in progress */
STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+ STAT_FAILOVER = 1 << 2, /* always set on vnics in pci standby state
+ * if seen a failover to the standby happened
+ */
};
enum vnic_devcmd_error {
@@ -558,9 +675,9 @@ enum fwinfo_asic_type {
FWINFO_ASIC_TYPE_UNKNOWN,
FWINFO_ASIC_TYPE_PALO,
FWINFO_ASIC_TYPE_SERENO,
+ FWINFO_ASIC_TYPE_CRUZ,
};
-
struct vnic_devcmd_notify {
u32 csum; /* checksum over following words */
@@ -595,25 +712,16 @@ struct vnic_devcmd_provinfo {
*/
#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
-#define FILTER_FIELDS_USNIC (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2) | \
- FILTER_FIELD_VALID(3) | \
- FILTER_FIELD_VALID(4))
-
-#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2) | \
- FILTER_FIELD_VALID(3) | \
- FILTER_FIELD_VALID(4) | \
- FILTER_FIELD_VALID(5))
-
-#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2))
-
#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+#define FILTER_FIELDS_USNIC (FILTER_FIELD_USNIC_VLAN | \
+ FILTER_FIELD_USNIC_ETHTYPE | \
+ FILTER_FIELD_USNIC_PROTO | \
+ FILTER_FIELD_USNIC_ID)
+
struct filter_usnic_id {
u32 flags;
u16 vlan;
@@ -628,10 +736,18 @@ struct filter_usnic_id {
#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_5TUP_PROTO | \
+ FILTER_FIELD_5TUP_SRC_AD | \
+ FILTER_FIELD_5TUP_DST_AD | \
+ FILTER_FIELD_5TUP_SRC_PT | \
+ FILTER_FIELD_5TUP_DST_PT)
+
/* Enums for the protocol field. */
enum protocol_e {
PROTO_UDP = 0,
PROTO_TCP = 1,
+ PROTO_IPV4 = 2,
+ PROTO_IPV6 = 3
};
struct filter_ipv4_5tuple {
@@ -646,12 +762,78 @@ struct filter_ipv4_5tuple {
#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VMQ_VLAN | \
+ FILTER_FIELD_VMQ_MAC)
+
+#define FILTER_FIELDS_NVGRE FILTER_FIELD_VMQ_MAC
+
struct filter_mac_vlan {
u32 flags;
u16 vlan;
u8 mac_addr[6];
} __attribute__((packed));
+#define FILTER_FIELD_VLAN_IP_3TUP_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_PT FILTER_FIELD_VALID(5)
+
+#define FILTER_FIELDS_VLAN_IP_3TUP (FILTER_FIELD_VLAN_IP_3TUP_VLAN | \
+ FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_AD | \
+ FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_PT)
+
+struct filter_vlan_ip_3tuple {
+ u32 flags;
+ u16 vlan;
+ u16 l3_protocol;
+ union {
+ u32 dst_addr_v4;
+ u8 dst_addr_v6[16];
+ } u;
+ u32 l4_protocol;
+ u16 dst_port;
+} __attribute__((packed));
+
+#define FILTER_GENERIC_1_BYTES 64
+
+enum filter_generic_1_layer {
+ FILTER_GENERIC_1_L2,
+ FILTER_GENERIC_1_L3,
+ FILTER_GENERIC_1_L4,
+ FILTER_GENERIC_1_L5,
+ FILTER_GENERIC_1_NUM_LAYERS
+};
+
+#define FILTER_GENERIC_1_IPV4 (1 << 0)
+#define FILTER_GENERIC_1_IPV6 (1 << 1)
+#define FILTER_GENERIC_1_UDP (1 << 2)
+#define FILTER_GENERIC_1_TCP (1 << 3)
+#define FILTER_GENERIC_1_TCP_OR_UDP (1 << 4)
+#define FILTER_GENERIC_1_IP4SUM_OK (1 << 5)
+#define FILTER_GENERIC_1_L4SUM_OK (1 << 6)
+#define FILTER_GENERIC_1_IPFRAG (1 << 7)
+
+#define FILTER_GENERIC_1_KEY_LEN 64
+
+/*
+ * Version 1 of generic filter specification
+ * position is only 16 bits, reserving positions > 64k to be used by firmware
+ */
+struct filter_generic_1 {
+ u16 position; /* lower position comes first */
+ u32 mask_flags;
+ u32 val_flags;
+ u16 mask_vlan;
+ u16 val_vlan;
+ struct {
+ u8 mask[FILTER_GENERIC_1_KEY_LEN]; /* 0 bit means "don't care"*/
+ u8 val[FILTER_GENERIC_1_KEY_LEN];
+ } __attribute__((packed)) layer[FILTER_GENERIC_1_NUM_LAYERS];
+} __attribute__((packed));
+
/* Specifies the filter_action type. */
enum {
FILTER_ACTION_RQ_STEERING = 0,
@@ -670,6 +852,10 @@ enum filter_type {
FILTER_USNIC_ID = 0,
FILTER_IPV4_5TUPLE = 1,
FILTER_MAC_VLAN = 2,
+ FILTER_VLAN_IP_3TUPLE = 3,
+ FILTER_NVGRE_VMQ = 4,
+ FILTER_USNIC_IP = 5,
+ FILTER_DPDK_1 = 6,
FILTER_MAX
};
@@ -679,6 +865,27 @@ struct filter {
struct filter_usnic_id usnic;
struct filter_ipv4_5tuple ipv4;
struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ } u;
+} __attribute__((packed));
+
+/*
+ * This is a strict superset of "struct filter" and exists only
+ * because many drivers use "sizeof (struct filter)" in deciding TLV size.
+ * This new, larger struct filter would cause any code that uses that method
+ * to not work with older firmware, so we add filter_v2 to hold the
+ * new filter types. Drivers should use vnic_filter_size() to determine
+ * the TLV size instead of sizeof (struct fiter_v2) to guard against future
+ * growth.
+ */
+struct filter_v2 {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ struct filter_generic_1 generic_1;
} u;
} __attribute__((packed));
@@ -687,14 +894,55 @@ enum {
CLSF_TLV_ACTION = 1,
};
-#define FILTER_MAX_BUF_SIZE 100 /* Maximum size of buffer to CMD_ADD_FILTER */
-
struct filter_tlv {
- uint32_t type;
- uint32_t length;
- uint32_t val[0];
+ u_int32_t type;
+ u_int32_t length;
+ u_int32_t val[0];
};
+/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
+#define FILTER_MAX_BUF_SIZE 100
+#define FILTER_V2_MAX_BUF_SIZE (sizeof(struct filter_v2) + \
+ sizeof(struct filter_action) + \
+ (2 * sizeof(struct filter_tlv)))
+
+/*
+ * Compute actual structure size given filter type. To be "future-proof,"
+ * drivers should use this instead of "sizeof (struct filter_v2)" when
+ * computing length for TLV.
+ */
+static inline u_int32_t
+vnic_filter_size(struct filter_v2 *fp)
+{
+ u_int32_t size;
+
+ switch (fp->type) {
+ case FILTER_USNIC_ID:
+ size = sizeof(fp->u.usnic);
+ break;
+ case FILTER_IPV4_5TUPLE:
+ size = sizeof(fp->u.ipv4);
+ break;
+ case FILTER_MAC_VLAN:
+ case FILTER_NVGRE_VMQ:
+ size = sizeof(fp->u.mac_vlan);
+ break;
+ case FILTER_VLAN_IP_3TUPLE:
+ size = sizeof(fp->u.vlan_3tuple);
+ break;
+ case FILTER_USNIC_IP:
+ case FILTER_DPDK_1:
+ size = sizeof(fp->u.generic_1);
+ break;
+ default:
+ size = sizeof(fp->u);
+ break;
+ }
+ size += sizeof(fp->type);
+ return size;
+}
+
+
enum {
CLSF_ADD = 0,
CLSF_DEL = 1,
@@ -766,8 +1014,30 @@ typedef enum {
OVERLAY_FEATURE_MAX,
} overlay_feature_t;
-#define OVERLAY_OFFLOAD_ENABLE 0
-#define OVERLAY_OFFLOAD_DISABLE 1
+#define OVERLAY_OFFLOAD_ENABLE 0
+#define OVERLAY_OFFLOAD_DISABLE 1
+#define OVERLAY_OFFLOAD_ENABLE_V2 2
#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0
+
+/*
+ * Use this enum to get the supported versions for each of these features
+ * If you need to use the devcmd_get_supported_feature_version(), add
+ * the new feature into this enum and install function handler in devcmd.c
+ */
+typedef enum {
+ VIC_FEATURE_VXLAN,
+ VIC_FEATURE_RDMA,
+ VIC_FEATURE_MAX,
+} vic_feature_t;
+
+/*
+ * CMD_CONFIG_GRPINTR subcommands
+ */
+typedef enum {
+ GRPINTR_ENABLE = 1,
+ GRPINTR_DISABLE,
+ GRPINTR_UPD_VECT,
+} grpintr_subcmd_t;
+
#endif /* _VNIC_DEVCMD_H_ */
diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
index 2d9104c4..f3fd39f7 100644
--- a/drivers/net/enic/base/vnic_rq.h
+++ b/drivers/net/enic/base/vnic_rq.h
@@ -97,6 +97,7 @@ struct vnic_rq {
struct rte_mbuf *pkt_first_seg;
struct rte_mbuf *pkt_last_seg;
unsigned int max_mbufs_per_pkt;
+ uint16_t tot_nb_desc;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 8f12b435..865cd76e 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -92,6 +92,11 @@ struct enic_fdir {
struct rte_eth_fdir_stats stats;
struct rte_hash *hash;
struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
+ u32 modes;
+ u32 types_mask;
+ void (*copy_fltr_fn)(struct filter_v2 *filt,
+ struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
};
struct enic_soft_stats {
@@ -128,6 +133,7 @@ struct enic {
int link_status;
u8 hw_ip_checksum;
u16 max_mtu;
+ u16 adv_filters;
unsigned int flags;
unsigned int priv_flags;
@@ -160,6 +166,7 @@ struct enic {
/* linked list storing memory allocations */
LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list;
rte_spinlock_t memzone_list_lock;
+ rte_spinlock_t mtu_lock;
};
@@ -169,14 +176,22 @@ static inline unsigned int enic_sop_rq_idx_to_cq_idx(unsigned int sop_idx)
return sop_idx / 2;
}
-static inline unsigned int enic_sop_rq(unsigned int rq)
+/* Get the RTE RQ index from a Start of Packet(SOP) RQ index */
+static inline unsigned int enic_sop_rq_idx_to_rte_idx(unsigned int sop_idx)
{
- return rq * 2;
+ return sop_idx / 2;
+}
+
+/* Get the Start of Packet(SOP) RQ index from a RTE RQ index */
+static inline unsigned int enic_rte_rq_idx_to_sop_idx(unsigned int rte_idx)
+{
+ return rte_idx * 2;
}
-static inline unsigned int enic_data_rq(unsigned int rq)
+/* Get the Data RQ index from a RTE RQ index */
+static inline unsigned int enic_rte_rq_idx_to_data_idx(unsigned int rte_idx)
{
- return rq * 2 + 1;
+ return rte_idx * 2 + 1;
}
static inline unsigned int enic_vnic_rq_count(struct enic *enic)
@@ -276,7 +291,18 @@ extern int enic_clsf_init(struct enic *enic);
extern void enic_clsf_destroy(struct enic *enic);
uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t enic_dummy_recv_pkts(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts);
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
+int enic_link_update(struct enic *enic);
+void enic_fdir_info(struct enic *enic);
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
+void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
+void copy_fltr_v2(__rte_unused struct filter_v2 *fltr,
+ __rte_unused struct rte_eth_fdir_input *input,
+ __rte_unused struct rte_eth_fdir_masks *masks);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index 111b1942..bcf479ac 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -38,6 +38,11 @@
#include <rte_malloc.h>
#include <rte_hash.h>
#include <rte_byteorder.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_sctp.h>
+#include <rte_eth_ctrl.h>
#include "enic_compat.h"
#include "enic.h"
@@ -67,6 +72,262 @@ void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
*stats = enic->fdir.stats;
}
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
+{
+ info->mode = (enum rte_fdir_mode)enic->fdir.modes;
+ info->flow_types_mask[0] = enic->fdir.types_mask;
+}
+
+void enic_fdir_info(struct enic *enic)
+{
+ enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
+ enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ if (enic->adv_filters) {
+ enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ enic->fdir.copy_fltr_fn = copy_fltr_v2;
+ } else {
+ enic->fdir.copy_fltr_fn = copy_fltr_v1;
+ }
+}
+
+static void
+enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
+ enum filter_generic_1_layer layer, void *mask, void *val,
+ unsigned int len)
+{
+ gp->mask_flags |= flag;
+ gp->val_flags |= gp->mask_flags;
+ memcpy(gp->layer[layer].mask, mask, len);
+ memcpy(gp->layer[layer].val, val, len);
+}
+
+/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
+ * without advanced filter support.
+ */
+void
+copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ __rte_unused struct rte_eth_fdir_masks *masks)
+{
+ fltr->type = FILTER_IPV4_5TUPLE;
+ fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
+ input->flow.ip4_flow.src_ip);
+ fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
+ input->flow.ip4_flow.dst_ip);
+ fltr->u.ipv4.src_port = rte_be_to_cpu_16(
+ input->flow.udp4_flow.src_port);
+ fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
+ input->flow.udp4_flow.dst_port);
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
+ fltr->u.ipv4.protocol = PROTO_TCP;
+ else
+ fltr->u.ipv4.protocol = PROTO_UDP;
+
+ fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+}
+
+/* Copy Flow Director filter to a VIC generic filter (requires advanced
+ * filter support.
+ */
+void
+copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks)
+{
+ struct filter_generic_1 *gp = &fltr->u.generic_1;
+ int i;
+
+ fltr->type = FILTER_DPDK_1;
+ memset(gp, 0, sizeof(*gp));
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ struct udp_hdr udp_mask, udp_val;
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ memset(&udp_val, 0, sizeof(udp_val));
+
+ if (input->flow.udp4_flow.src_port) {
+ udp_mask.src_port = masks->src_port_mask;
+ udp_val.src_port = input->flow.udp4_flow.src_port;
+ }
+ if (input->flow.udp4_flow.dst_port) {
+ udp_mask.dst_port = masks->dst_port_mask;
+ udp_val.dst_port = input->flow.udp4_flow.dst_port;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+ &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
+ struct tcp_hdr tcp_mask, tcp_val;
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ memset(&tcp_val, 0, sizeof(tcp_val));
+
+ if (input->flow.tcp4_flow.src_port) {
+ tcp_mask.src_port = masks->src_port_mask;
+ tcp_val.src_port = input->flow.tcp4_flow.src_port;
+ }
+ if (input->flow.tcp4_flow.dst_port) {
+ tcp_mask.dst_port = masks->dst_port_mask;
+ tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+ &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
+ struct sctp_hdr sctp_mask, sctp_val;
+ memset(&sctp_mask, 0, sizeof(sctp_mask));
+ memset(&sctp_val, 0, sizeof(sctp_val));
+
+ if (input->flow.sctp4_flow.src_port) {
+ sctp_mask.src_port = masks->src_port_mask;
+ sctp_val.src_port = input->flow.sctp4_flow.src_port;
+ }
+ if (input->flow.sctp4_flow.dst_port) {
+ sctp_mask.dst_port = masks->dst_port_mask;
+ sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
+ }
+ if (input->flow.sctp4_flow.verify_tag) {
+ sctp_mask.tag = 0xffffffff;
+ sctp_val.tag = input->flow.sctp4_flow.verify_tag;
+ }
+
+ /* v4 proto should be 132, override ip4_flow.proto */
+ input->flow.ip4_flow.proto = 132;
+
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+ &sctp_val, sizeof(struct sctp_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
+ struct ipv4_hdr ip4_mask, ip4_val;
+ memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
+ memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
+
+ if (input->flow.ip4_flow.tos) {
+ ip4_mask.type_of_service = 0xff;
+ ip4_val.type_of_service = input->flow.ip4_flow.tos;
+ }
+ if (input->flow.ip4_flow.ttl) {
+ ip4_mask.time_to_live = 0xff;
+ ip4_val.time_to_live = input->flow.ip4_flow.ttl;
+ }
+ if (input->flow.ip4_flow.proto) {
+ ip4_mask.next_proto_id = 0xff;
+ ip4_val.next_proto_id = input->flow.ip4_flow.proto;
+ }
+ if (input->flow.ip4_flow.src_ip) {
+ ip4_mask.src_addr = masks->ipv4_mask.src_ip;
+ ip4_val.src_addr = input->flow.ip4_flow.src_ip;
+ }
+ if (input->flow.ip4_flow.dst_ip) {
+ ip4_mask.dst_addr = masks->ipv4_mask.dst_ip;
+ ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
+ &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ struct udp_hdr udp_mask, udp_val;
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ memset(&udp_val, 0, sizeof(udp_val));
+
+ if (input->flow.udp6_flow.src_port) {
+ udp_mask.src_port = masks->src_port_mask;
+ udp_val.src_port = input->flow.udp6_flow.src_port;
+ }
+ if (input->flow.udp6_flow.dst_port) {
+ udp_mask.dst_port = masks->dst_port_mask;
+ udp_val.dst_port = input->flow.udp6_flow.dst_port;
+ }
+ enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+ &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
+ struct tcp_hdr tcp_mask, tcp_val;
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ memset(&tcp_val, 0, sizeof(tcp_val));
+
+ if (input->flow.tcp6_flow.src_port) {
+ tcp_mask.src_port = masks->src_port_mask;
+ tcp_val.src_port = input->flow.tcp6_flow.src_port;
+ }
+ if (input->flow.tcp6_flow.dst_port) {
+ tcp_mask.dst_port = masks->dst_port_mask;
+ tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
+ }
+ enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+ &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
+ struct sctp_hdr sctp_mask, sctp_val;
+ memset(&sctp_mask, 0, sizeof(sctp_mask));
+ memset(&sctp_val, 0, sizeof(sctp_val));
+
+ if (input->flow.sctp6_flow.src_port) {
+ sctp_mask.src_port = masks->src_port_mask;
+ sctp_val.src_port = input->flow.sctp6_flow.src_port;
+ }
+ if (input->flow.sctp6_flow.dst_port) {
+ sctp_mask.dst_port = masks->dst_port_mask;
+ sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
+ }
+ if (input->flow.sctp6_flow.verify_tag) {
+ sctp_mask.tag = 0xffffffff;
+ sctp_val.tag = input->flow.sctp6_flow.verify_tag;
+ }
+
+ /* v4 proto should be 132, override ipv6_flow.proto */
+ input->flow.ipv6_flow.proto = 132;
+
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+ &sctp_val, sizeof(struct sctp_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
+ struct ipv6_hdr ipv6_mask, ipv6_val;
+ memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
+ memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
+
+ if (input->flow.ipv6_flow.proto) {
+ ipv6_mask.proto = 0xff;
+ ipv6_val.proto = input->flow.ipv6_flow.proto;
+ }
+ for (i = 0; i < 4; i++) {
+ *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
+ masks->ipv6_mask.src_ip[i];
+ *(uint32_t *)&ipv6_val.src_addr[i * 4] =
+ input->flow.ipv6_flow.src_ip[i];
+ }
+ for (i = 0; i < 4; i++) {
+ *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
+ masks->ipv6_mask.src_ip[i];
+ *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
+ input->flow.ipv6_flow.dst_ip[i];
+ }
+ if (input->flow.ipv6_flow.tc) {
+ ipv6_mask.vtc_flow = 0x00ff0000;
+ ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 16;
+ }
+ if (input->flow.ipv6_flow.hop_limits) {
+ ipv6_mask.hop_limits = 0xff;
+ ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
+ &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
+ }
+}
+
int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
int32_t pos;
@@ -97,7 +358,7 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
struct enic_fdir_node *key;
- struct filter fltr = {0};
+ struct filter_v2 fltr;
int32_t pos;
u8 do_free = 0;
u16 old_fltr_id = 0;
@@ -105,9 +366,9 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
u16 flex_bytes;
u16 queue;
- flowtype_supported = (
- (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) ||
- (RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type));
+ memset(&fltr, 0, sizeof(fltr));
+ flowtype_supported = enic->fdir.types_mask
+ & (1 << params->input.flow_type);
flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
(params->input.flow_ext.flexbytes[0] & 0xFF));
@@ -121,7 +382,10 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
}
/* Get the enicpmd RQ from the DPDK Rx queue */
- queue = enic_sop_rq(params->action.rx_queue);
+ queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
+
+ if (!enic->rq[queue].in_use)
+ return -EINVAL;
/* See if the key is already there in the table */
pos = rte_hash_del_key(enic->fdir.hash, params);
@@ -185,22 +449,8 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
key->filter = *params;
key->rq_index = queue;
- fltr.type = FILTER_IPV4_5TUPLE;
- fltr.u.ipv4.src_addr = rte_be_to_cpu_32(
- params->input.flow.ip4_flow.src_ip);
- fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(
- params->input.flow.ip4_flow.dst_ip);
- fltr.u.ipv4.src_port = rte_be_to_cpu_16(
- params->input.flow.udp4_flow.src_port);
- fltr.u.ipv4.dst_port = rte_be_to_cpu_16(
- params->input.flow.udp4_flow.dst_port);
-
- if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type)
- fltr.u.ipv4.protocol = PROTO_TCP;
- else
- fltr.u.ipv4.protocol = PROTO_UDP;
-
- fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic->fdir.copy_fltr_fn(&fltr, &params->input,
+ &enic->rte_dev->data->dev_conf.fdir_conf.mask);
if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
key->fltr_id = queue;
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 04e7ba8d..2b154ec2 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -95,10 +95,12 @@ enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
break;
case RTE_ETH_FILTER_FLUSH:
- case RTE_ETH_FILTER_INFO:
dev_warning(enic, "unsupported operation %u", filter_op);
ret = -ENOTSUP;
break;
+ case RTE_ETH_FILTER_INFO:
+ enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
+ break;
default:
dev_err(enic, "unknown operation %u", filter_op);
ret = -EINVAL;
@@ -152,7 +154,7 @@ static int enicpmd_dev_setup_intr(struct enic *enic)
return 0;
/* check start of packet (SOP) RQs only in case scatter is disabled. */
for (index = 0; index < enic->rq_count; index++) {
- if (!enic->rq[enic_sop_rq(index)].ctrl)
+ if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
break;
}
if (enic->rq_count != index)
@@ -260,6 +262,35 @@ static void enicpmd_dev_rx_queue_release(void *rxq)
enic_free_rq(rxq);
}
+static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id)
+{
+ struct enic *enic = pmd_priv(dev);
+ uint32_t queue_count = 0;
+ struct vnic_cq *cq;
+ uint32_t cq_tail;
+ uint16_t cq_idx;
+ int rq_num;
+
+ if (rx_queue_id >= dev->data->nb_rx_queues) {
+ dev_err(enic, "Invalid RX queue id=%d", rx_queue_id);
+ return 0;
+ }
+
+ rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
+ cq = &enic->cq[enic_cq_rq(enic, rq_num)];
+ cq_idx = cq->to_clean;
+
+ cq_tail = ioread32(&cq->ctrl->cq_tail);
+
+ if (cq_tail < cq_idx)
+ cq_tail += cq->ring.desc_count;
+
+ queue_count = cq_tail - cq_idx;
+
+ return queue_count;
+}
+
static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -282,7 +313,7 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
}
eth_dev->data->rx_queues[queue_idx] =
- (void *)&enic->rq[enic_sop_rq(queue_idx)];
+ (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
rx_conf->rx_free_thresh);
@@ -400,17 +431,9 @@ static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
__rte_unused int wait_to_complete)
{
struct enic *enic = pmd_priv(eth_dev);
- int ret;
- int link_status = 0;
ENICPMD_FUNC_TRACE();
- link_status = enic_get_link_status(enic);
- ret = (link_status == enic->link_status);
- enic->link_status = link_status;
- eth_dev->data->dev_link.link_status = link_status;
- eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
- return ret;
+ return enic_link_update(enic);
}
static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
@@ -460,6 +483,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
RTE_PTYPE_L4_TCP,
@@ -564,7 +589,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.tx_queue_stop = enicpmd_dev_tx_queue_stop,
.rx_queue_setup = enicpmd_dev_rx_queue_setup,
.rx_queue_release = enicpmd_dev_rx_queue_release,
- .rx_queue_count = NULL,
+ .rx_queue_count = enicpmd_dev_rx_queue_count,
.rx_descriptor_done = NULL,
.tx_queue_setup = enicpmd_dev_tx_queue_setup,
.tx_queue_release = enicpmd_dev_tx_queue_release,
@@ -609,32 +634,14 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
static struct eth_driver rte_enic_pmd = {
.pci_drv = {
- .name = "rte_enic_pmd",
.id_table = pci_id_enic_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_enicpmd_dev_init,
.dev_private_size = sizeof(struct enic),
};
-/* Driver initialization routine.
- * Invoked once at EAL init time.
- * Register as the [Poll Mode] Driver of Cisco ENIC device.
- */
-static int
-rte_enic_pmd_init(__rte_unused const char *name,
- __rte_unused const char *params)
-{
- ENICPMD_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_enic_pmd);
- return 0;
-}
-
-static struct rte_driver rte_enic_driver = {
- .type = PMD_PDEV,
- .init = rte_enic_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_enic_driver, enic);
-DRIVER_REGISTER_PCI_TABLE(enic, pci_id_enic_map);
+RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 7549c12e..f0b15ac1 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -240,14 +240,14 @@ void enic_init_vnic_resources(struct enic *enic)
struct vnic_rq *data_rq;
for (index = 0; index < enic->rq_count; index++) {
- cq_idx = enic_cq_rq(enic, enic_sop_rq(index));
+ cq_idx = enic_cq_rq(enic, enic_rte_rq_idx_to_sop_idx(index));
- vnic_rq_init(&enic->rq[enic_sop_rq(index)],
+ vnic_rq_init(&enic->rq[enic_rte_rq_idx_to_sop_idx(index)],
cq_idx,
error_interrupt_enable,
error_interrupt_offset);
- data_rq = &enic->rq[enic_data_rq(index)];
+ data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(index)];
if (data_rq->in_use)
vnic_rq_init(data_rq,
cq_idx,
@@ -410,14 +410,32 @@ enic_free_consistent(void *priv,
rte_free(mze);
}
+int enic_link_update(struct enic *enic)
+{
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ int ret;
+ int link_status = 0;
+
+ link_status = enic_get_link_status(enic);
+ ret = (link_status == enic->link_status);
+ enic->link_status = link_status;
+ eth_dev->data->dev_link.link_status = link_status;
+ eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
+ return ret;
+}
+
static void
enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
void *arg)
{
- struct enic *enic = pmd_priv((struct rte_eth_dev *)arg);
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
+ struct enic *enic = pmd_priv(dev);
vnic_intr_return_all_credits(&enic->intr);
+ enic_link_update(enic);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
enic_log_q_error(enic);
}
@@ -429,7 +447,13 @@ int enic_enable(struct enic *enic)
eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+
+ /* vnic notification of link status has already been turned on in
+ * enic_dev_init() which is called during probe time. Here we are
+ * just turning on interrupt vector 0 if needed.
+ */
+ if (eth_dev->data->dev_conf.intr_conf.lsc)
+ vnic_dev_notify_set(enic->vdev, 0);
if (enic_clsf_init(enic))
dev_warning(enic, "Init of hash table for clsf failed."\
@@ -437,17 +461,17 @@ int enic_enable(struct enic *enic)
for (index = 0; index < enic->rq_count; index++) {
err = enic_alloc_rx_queue_mbufs(enic,
- &enic->rq[enic_sop_rq(index)]);
+ &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
if (err) {
dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
return err;
}
err = enic_alloc_rx_queue_mbufs(enic,
- &enic->rq[enic_data_rq(index)]);
+ &enic->rq[enic_rte_rq_idx_to_data_idx(index)]);
if (err) {
/* release the allocated mbufs for the sop rq*/
enic_rxmbuf_queue_release(enic,
- &enic->rq[enic_sop_rq(index)]);
+ &enic->rq[enic_rte_rq_idx_to_sop_idx(index)]);
dev_err(enic, "Failed to alloc data RX queue mbufs\n");
return err;
@@ -517,6 +541,9 @@ void enic_free_rq(void *rxq)
vnic_rq_free(rq_data);
vnic_cq_free(&enic->cq[enic_sop_rq_idx_to_cq_idx(rq_sop->index)]);
+
+ rq_sop->in_use = 0;
+ rq_data->in_use = 0;
}
void enic_start_wq(struct enic *enic, uint16_t queue_idx)
@@ -541,8 +568,10 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
void enic_start_rq(struct enic *enic, uint16_t queue_idx)
{
- struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
- struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+ rq_data = &enic->rq[rq_sop->data_queue_idx];
struct rte_eth_dev *eth_dev = enic->rte_dev;
if (rq_data->in_use)
@@ -556,8 +585,10 @@ int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
{
int ret1 = 0, ret2 = 0;
struct rte_eth_dev *eth_dev = enic->rte_dev;
- struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)];
- struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
+ struct vnic_rq *rq_sop;
+ struct vnic_rq *rq_data;
+ rq_sop = &enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
+ rq_data = &enic->rq[rq_sop->data_queue_idx];
ret2 = vnic_rq_disable(rq_sop);
rte_mb();
@@ -578,13 +609,14 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
uint16_t nb_desc, uint16_t free_thresh)
{
int rc;
- uint16_t sop_queue_idx = enic_sop_rq(queue_idx);
- uint16_t data_queue_idx = enic_data_rq(queue_idx);
+ uint16_t sop_queue_idx = enic_rte_rq_idx_to_sop_idx(queue_idx);
+ uint16_t data_queue_idx = enic_rte_rq_idx_to_data_idx(queue_idx);
struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
unsigned int mbuf_size, mbufs_per_pkt;
unsigned int nb_sop_desc, nb_data_desc;
uint16_t min_sop, max_sop, min_data, max_data;
+ uint16_t mtu = enic->rte_dev->data->mtu;
rq_sop->is_sop = 1;
rq_sop->data_queue_idx = data_queue_idx;
@@ -604,9 +636,9 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
RTE_PKTMBUF_HEADROOM);
if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
- dev_info(enic, "Scatter rx mode enabled\n");
+ dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
/* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */
- mbufs_per_pkt = ((enic->config.mtu + ETHER_HDR_LEN + 4) +
+ mbufs_per_pkt = ((mtu + ETHER_HDR_LEN + 4) +
(mbuf_size - 1)) / mbuf_size;
} else {
dev_info(enic, "Scatter rx mode disabled\n");
@@ -657,7 +689,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
}
if (mbufs_per_pkt > 1) {
dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
- enic->config.mtu, mbuf_size, min_sop + min_data,
+ mtu, mbuf_size, min_sop + min_data,
max_sop + max_data);
}
dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
@@ -708,6 +740,8 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
goto err_free_sop_mbuf;
}
+ rq_sop->tot_nb_desc = nb_desc; /* squirl away for MTU update function */
+
return 0;
err_free_sop_mbuf:
@@ -804,6 +838,10 @@ int enic_disable(struct enic *enic)
vnic_intr_mask(&enic->intr);
(void)vnic_intr_masked(&enic->intr); /* flush write */
+ rte_intr_disable(&enic->pdev->intr_handle);
+ rte_intr_callback_unregister(&enic->pdev->intr_handle,
+ enic_intr_handler,
+ (void *)enic->rte_dev);
vnic_dev_disable(enic->vdev);
@@ -825,8 +863,14 @@ int enic_disable(struct enic *enic)
}
}
+ /* If we were using interrupts, set the interrupt vector to -1
+ * to disable interrupts. We are not disabling link notifcations,
+ * though, as we want the polling of link status to continue working.
+ */
+ if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
+ vnic_dev_notify_set(enic->vdev, -1);
+
vnic_dev_set_reset_flag(enic->vdev, 1);
- vnic_dev_notify_unset(enic->vdev);
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
@@ -928,7 +972,7 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
for (i = 0; i < (1 << rss_hash_bits); i++)
(*rss_cpu_buf_va).cpu[i / 4].b[i % 4] =
- enic_sop_rq(i % enic->rq_count);
+ enic_rte_rq_idx_to_sop_idx(i % enic->rq_count);
err = enic_set_rss_cpu(enic,
rss_cpu_buf_pa,
@@ -1028,6 +1072,9 @@ static void enic_dev_deinit(struct enic *enic)
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
+ /* stop link status checking */
+ vnic_dev_notify_unset(enic->vdev);
+
rte_free(eth_dev->data->mac_addrs);
}
@@ -1069,6 +1116,56 @@ int enic_set_vnic_res(struct enic *enic)
return rc;
}
+/* Initialize the completion queue for an RQ */
+static int
+enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
+{
+ struct vnic_rq *sop_rq, *data_rq;
+ unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
+ int rc = 0;
+
+ sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
+
+ vnic_cq_clean(&enic->cq[cq_idx]);
+ vnic_cq_init(&enic->cq[cq_idx],
+ 0 /* flow_control_enable */,
+ 1 /* color_enable */,
+ 0 /* cq_head */,
+ 0 /* cq_tail */,
+ 1 /* cq_tail_color */,
+ 0 /* interrupt_enable */,
+ 1 /* cq_entry_enable */,
+ 0 /* cq_message_enable */,
+ 0 /* interrupt offset */,
+ 0 /* cq_message_addr */);
+
+
+ vnic_rq_init_start(sop_rq, enic_cq_rq(enic,
+ enic_rte_rq_idx_to_sop_idx(rq_idx)), 0,
+ sop_rq->ring.desc_count - 1, 1, 0);
+ if (data_rq->in_use) {
+ vnic_rq_init_start(data_rq,
+ enic_cq_rq(enic,
+ enic_rte_rq_idx_to_data_idx(rq_idx)), 0,
+ data_rq->ring.desc_count - 1, 1, 0);
+ }
+
+ rc = enic_alloc_rx_queue_mbufs(enic, sop_rq);
+ if (rc)
+ return rc;
+
+ if (data_rq->in_use) {
+ rc = enic_alloc_rx_queue_mbufs(enic, data_rq);
+ if (rc) {
+ enic_rxmbuf_queue_release(enic, sop_rq);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
/* The Cisco NIC can send and receive packets up to a max packet size
* determined by the NIC type and firmware. There is also an MTU
* configured into the NIC via the CIMC/UCSM management interface
@@ -1078,6 +1175,9 @@ int enic_set_vnic_res(struct enic *enic)
*/
int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
{
+ unsigned int rq_idx;
+ struct vnic_rq *rq;
+ int rc = 0;
uint16_t old_mtu; /* previous setting */
uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */
struct rte_eth_dev *eth_dev = enic->rte_dev;
@@ -1085,10 +1185,6 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
old_mtu = eth_dev->data->mtu;
config_mtu = enic->config.mtu;
- /* only works with Rx scatter disabled */
- if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter)
- return -ENOTSUP;
-
if (new_mtu > enic->max_mtu) {
dev_err(enic,
"MTU not updated: requested (%u) greater than max (%u)\n",
@@ -1106,11 +1202,83 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
"MTU (%u) is greater than value configured in NIC (%u)\n",
new_mtu, config_mtu);
+ /* The easy case is when scatter is disabled. However if the MTU
+ * becomes greater than the mbuf data size, packet drops will ensue.
+ */
+ if (!enic->rte_dev->data->dev_conf.rxmode.enable_scatter) {
+ eth_dev->data->mtu = new_mtu;
+ goto set_mtu_done;
+ }
+
+ /* Rx scatter is enabled so reconfigure RQ's on the fly. The point is to
+ * change Rx scatter mode if necessary for better performance. I.e. if
+ * MTU was greater than the mbuf size and now it's less, scatter Rx
+ * doesn't have to be used and vice versa.
+ */
+ rte_spinlock_lock(&enic->mtu_lock);
+
+ /* Stop traffic on all RQs */
+ for (rq_idx = 0; rq_idx < enic->rq_count * 2; rq_idx++) {
+ rq = &enic->rq[rq_idx];
+ if (rq->is_sop && rq->in_use) {
+ rc = enic_stop_rq(enic,
+ enic_sop_rq_idx_to_rte_idx(rq_idx));
+ if (rc) {
+ dev_err(enic, "Failed to stop Rq %u\n", rq_idx);
+ goto set_mtu_done;
+ }
+ }
+ }
+
+ /* replace Rx funciton with a no-op to avoid getting stale pkts */
+ eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
+ rte_mb();
+
+ /* Allow time for threads to exit the real Rx function. */
+ usleep(100000);
+
+ /* now it is safe to reconfigure the RQs */
+
/* update the mtu */
eth_dev->data->mtu = new_mtu;
+ /* free and reallocate RQs with the new MTU */
+ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+ rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+
+ enic_free_rq(rq);
+ rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp,
+ rq->tot_nb_desc, rq->rx_free_thresh);
+ if (rc) {
+ dev_err(enic,
+ "Fatal MTU alloc error- No traffic will pass\n");
+ goto set_mtu_done;
+ }
+
+ rc = enic_reinit_rq(enic, rq_idx);
+ if (rc) {
+ dev_err(enic,
+ "Fatal MTU RQ reinit- No traffic will pass\n");
+ goto set_mtu_done;
+ }
+ }
+
+ /* put back the real receive function */
+ rte_mb();
+ eth_dev->rx_pkt_burst = enic_recv_pkts;
+ rte_mb();
+
+ /* restart Rx traffic */
+ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) {
+ rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
+ if (rq->is_sop && rq->in_use)
+ enic_start_rq(enic, rq_idx);
+ }
+
+set_mtu_done:
dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu);
- return 0;
+ rte_spinlock_unlock(&enic->mtu_lock);
+ return rc;
}
static int enic_dev_init(struct enic *enic)
@@ -1137,6 +1305,9 @@ static int enic_dev_init(struct enic *enic)
return -EINVAL;
}
+ /* Get the supported filters */
+ enic_fdir_info(enic);
+
eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
if (!eth_dev->data->mac_addrs) {
dev_err(enic, "mac addr storage alloc failed, aborting.\n");
@@ -1147,6 +1318,9 @@ static int enic_dev_init(struct enic *enic)
vnic_dev_set_reset_flag(enic->vdev, 0);
+ /* set up link status checking */
+ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
+
return 0;
}
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 84c5d336..8a230a16 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -62,6 +62,7 @@ int enic_get_vnic_config(struct enic *enic)
return err;
}
+
#define GET_CONFIG(m) \
do { \
err = vnic_dev_spec(enic->vdev, \
@@ -98,6 +99,10 @@ int enic_get_vnic_config(struct enic *enic)
enic->rte_dev->data->mtu = min_t(u16, enic->max_mtu,
max_t(u16, ENIC_MIN_MTU, c->mtu));
+ enic->adv_filters = vnic_dev_capable_adv_filters(enic->vdev);
+ dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters)
+ ? "" : "not "));
+
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index ad596136..f762a26c 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -149,30 +149,18 @@ enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
uint8_t cqrd_flags = cqrd->flags;
static const uint32_t cq_type_table[128] __rte_cache_aligned = {
[0x00] = RTE_PTYPE_UNKNOWN,
- [0x20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_NONFRAG,
- [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
- [0x60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_FRAG,
- [0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
- [0x10] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_NONFRAG,
- [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
- [0x50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_FRAG,
- [0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_UDP,
- [0x54] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN
- | RTE_PTYPE_L4_TCP,
+ [0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
+ [0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
+ [0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
+ [0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
+ [0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
/* All others reserved */
};
cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
@@ -185,9 +173,10 @@ static inline void
enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
- uint16_t ciflags, bwflags, pkt_flags = 0;
+ uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci;
ciflags = enic_cq_rx_desc_ciflags(cqrd);
bwflags = enic_cq_rx_desc_bwflags(cqrd);
+ vlan_tci = enic_cq_rx_desc_vlan(cqrd);
mbuf->ol_flags = 0;
@@ -195,13 +184,17 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
goto mbuf_flags_done;
- /* VLAN stripping */
+ /* VLAN STRIPPED flag. The L2 packet type updated here also */
if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
- mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
} else {
- mbuf->vlan_tci = 0;
+ if (vlan_tci != 0)
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
+ else
+ mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
}
+ mbuf->vlan_tci = vlan_tci;
/* RSS flag */
if (enic_cq_rx_desc_rss_type(cqrd)) {
@@ -227,6 +220,17 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
mbuf->ol_flags = pkt_flags;
}
+/* dummy receive function to replace actual function in
+ * order to do safe reconfiguration operations.
+ */
+uint16_t
+enic_dummy_recv_pkts(__rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
uint16_t
enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 35cbe086..923690c0 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -677,7 +677,7 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev)
/* Enable use of FTAG bit in TX descriptor, PFVTCTL
* register is read-only for VF.
*/
- if (fm10k_check_ftag(dev->pci_dev->devargs)) {
+ if (fm10k_check_ftag(dev->pci_dev->device.devargs)) {
if (hw->mac.type == fm10k_mac_pf) {
FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
FM10K_PFVTCTL_FTAG_DESC_ENABLE);
@@ -2737,7 +2737,7 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
int use_sse = 1;
uint16_t tx_ftag_en = 0;
- if (fm10k_check_ftag(dev->pci_dev->devargs))
+ if (fm10k_check_ftag(dev->pci_dev->device.devargs))
tx_ftag_en = 1;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -2768,7 +2768,7 @@ fm10k_set_rx_function(struct rte_eth_dev *dev)
uint16_t i, rx_using_sse;
uint16_t rx_ftag_en = 0;
- if (fm10k_check_ftag(dev->pci_dev->devargs))
+ if (fm10k_check_ftag(dev->pci_dev->device.devargs))
rx_ftag_en = 1;
/* In order to allow Vector Rx there are a few configuration
@@ -3061,34 +3061,16 @@ static const struct rte_pci_id pci_id_fm10k_map[] = {
static struct eth_driver rte_pmd_fm10k = {
.pci_drv = {
- .name = "rte_pmd_fm10k",
.id_table = pci_id_fm10k_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_fm10k_dev_init,
.eth_dev_uninit = eth_fm10k_dev_uninit,
.dev_private_size = sizeof(struct fm10k_adapter),
};
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of PCI FM10K devices.
- */
-static int
-rte_pmd_fm10k_init(__rte_unused const char *name,
- __rte_unused const char *params)
-{
- PMD_INIT_FUNC_TRACE();
- rte_eth_driver_register(&rte_pmd_fm10k);
- return 0;
-}
-
-static struct rte_driver rte_fm10k_driver = {
- .type = PMD_PDEV,
- .init = rte_pmd_fm10k_init,
-};
-
-PMD_REGISTER_DRIVER(rte_fm10k_driver, fm10k);
-DRIVER_REGISTER_PCI_TABLE(fm10k, pci_id_fm10k_map);
+RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index bf5888b0..32cc7ff9 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -101,11 +101,15 @@ rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
(FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) ==
(FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)))
m->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
if (unlikely((d->d.staterr &
(FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) ==
(FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)))
m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
}
uint16_t
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index c9a49e36..27f3e43f 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -67,6 +67,8 @@ fm10k_reset_tx_queue(struct fm10k_tx_queue *txq);
#define RXEFLAG_SHIFT (13)
/* IPE/L4E flag shift */
#define L3L4EFLAG_SHIFT (14)
+/* shift PKT_RX_L4_CKSUM_GOOD into one byte by 1 bit */
+#define CKSUM_SHIFT (1)
static inline void
fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
@@ -92,11 +94,18 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
0x0000, 0x0000, 0x0000, 0x0000,
0x0001, 0x0001, 0x0001, 0x0001);
+ /* mask the lower byte of ol_flags */
+ const __m128i ol_flags_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x00FF, 0x00FF, 0x00FF, 0x00FF);
+
const __m128i l3l4cksum_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_IP_CKSUM_BAD, PKT_RX_L4_CKSUM_BAD, 0);
+ (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> CKSUM_SHIFT,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> CKSUM_SHIFT);
const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
@@ -139,6 +148,10 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
/* Process L4/L3 checksum error flags */
cksumflag = _mm_srli_epi16(cksumflag, L3L4EFLAG_SHIFT);
cksumflag = _mm_shuffle_epi8(l3l4cksum_flag, cksumflag);
+
+ /* clean the higher byte and shift back the flag bits */
+ cksumflag = _mm_and_si128(cksumflag, ol_flags_msk);
+ cksumflag = _mm_slli_epi16(cksumflag, CKSUM_SHIFT);
vtag1 = _mm_or_si128(cksumflag, vtag1);
vol.dword = _mm_cvtsi128_si64(vtag1);
@@ -234,11 +247,8 @@ fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
- /* - no csum error report support
- * - no header split support
- */
- if (rxmode->hw_ip_checksum == 1 ||
- rxmode->header_split == 1)
+ /* no header split support */
+ if (rxmode->header_split == 1)
return -1;
return 0;
@@ -406,7 +416,7 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
rxdp = rxq->hw_ring + next_dd;
- _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+ rte_prefetch0(rxdp);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 53fe145f..13085fb7 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -97,14 +97,18 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec.c
+ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
+else
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c
+endif
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
# vector PMD driver needs SSE4.1 support
ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
-CFLAGS_i40e_rxtx_vec.o += -msse4.1
+CFLAGS_i40e_rxtx_vec_sse.o += -msse4.1
endif
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index 2b7a7608..4f067720 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -196,6 +196,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -223,6 +224,9 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
@@ -471,13 +475,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
+#define I40E_AQ_ARP_INIT_IPV4 0x0800
+#define I40E_AQ_ARP_UNSUP_CTL 0x1000
+#define I40E_AQ_ARP_ENA 0x2000
+#define I40E_AQ_ARP_ADD_IPV4 0x4000
+#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
- __le32 pfpm_proxyfc;
+ __le32 enabled_offloads;
+#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
@@ -492,17 +498,19 @@ struct i40e_aqc_ns_proxy_data {
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+#define I40E_AQ_NS_PROXY_ADD_0 0x0001
+#define I40E_AQ_NS_PROXY_DEL_0 0x0002
+#define I40E_AQ_NS_PROXY_ADD_1 0x0004
+#define I40E_AQ_NS_PROXY_DEL_1 0x0008
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
@@ -552,6 +560,7 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
__le16 command_flags;
+#define I40E_AQC_MC_MAG_EN 0x0100
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
@@ -581,9 +590,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
struct i40e_aqc_set_wol_filter {
__le16 filter_index;
#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
@@ -594,23 +612,29 @@ struct i40e_aqc_set_wol_filter {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
/* Get Wake Reason (0x0121) */
struct i40e_aqc_get_wake_reason_completion {
u8 reserved_1[2];
__le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
u8 reserved_2[12];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
-struct i40e_aqc_set_wol_filter_data {
- u8 filter[128];
- u8 mask[16];
-};
-
-I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
-
#endif /* X722_SUPPORT */
/* Switch configuration commands (0x02xx) */
@@ -694,6 +718,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -745,6 +771,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -1644,6 +1671,24 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
+/* Get and set the active HMC resource profile and status.
+ * (direct 0x0500) and (direct 0x0501)
+ */
+struct i40e_aq_get_set_hmc_resource_profile {
+ u8 pm_profile;
+ u8 pe_vf_enabled;
+ u8 reserved[14];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
+
+enum i40e_aq_hmc_profile {
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ I40E_HMC_PROFILE_DEFAULT = 1,
+ I40E_HMC_PROFILE_FAVOR_VF = 2,
+ I40E_HMC_PROFILE_EQUAL = 3,
+};
+
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1691,6 +1736,7 @@ enum i40e_aq_phy_type {
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
@@ -1698,7 +1744,8 @@ enum i40e_aq_link_speed {
I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
@@ -1721,6 +1768,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_LINK_ENABLED 0x08
#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
+#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
+#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
@@ -1731,7 +1780,13 @@ struct i40e_aq_get_phy_abilities_resp {
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 mod_type_ext;
+ u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
@@ -1753,7 +1808,18 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 fec_config;
+#define I40E_AQ_SET_FEC_ABILITY_KR (1 << 0)
+#define I40E_AQ_SET_FEC_ABILITY_RS (1 << 1)
+#define I40E_AQ_SET_FEC_REQUEST_KR (1 << 2)
+#define I40E_AQ_SET_FEC_REQUEST_RS (1 << 3)
+#define I40E_AQ_SET_FEC_AUTO (1 << 4)
+ u8 reserved;
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@@ -1833,16 +1899,26 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
+#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01
+#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 4407f2d3..9a6b3ed6 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -81,7 +81,6 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_SFP_I_X722:
- case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
#endif
@@ -383,8 +382,7 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
d_buf[j] = buf[i];
i40e_debug(hw, mask,
"\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i_sav, d_buf[0], d_buf[1],
- d_buf[2], d_buf[3],
+ i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
d_buf[4], d_buf[5], d_buf[6], d_buf[7],
d_buf[8], d_buf[9], d_buf[10], d_buf[11],
d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
@@ -1191,6 +1189,32 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
}
/**
+ * i40e_get_san_mac_addr - get SAN MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to SAN MAC address
+ *
+ * Reads the adapter's SAN MAC address from NVM
+ **/
+enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw,
+ u8 *mac_addr)
+{
+ struct i40e_aqc_mac_address_read_data addrs;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+ if (status)
+ return status;
+
+ if (flags & I40E_AQC_SAN_ADDR_VALID)
+ memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+ else
+ status = I40E_ERR_INVALID_MAC_ADDR;
+
+ return status;
+}
+
+/**
* i40e_read_pba_string - Reads part number string from EEPROM
* @hw: pointer to hardware structure
* @pba_num: stores the part number string from the EEPROM
@@ -1670,8 +1694,10 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
- if (report_init)
+ if (report_init) {
hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ }
return status;
}
@@ -2215,6 +2241,34 @@ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
}
/**
+ * i40e_aq_clear_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(0);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_unicast_promiscuous
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -3792,16 +3846,8 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (p->fcoe)
i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n");
-#ifdef I40E_FCOE_ENA
- /* Software override ensuring FCoE is disabled if npar or mfp
- * mode because it is not supported in these modes.
- */
- if (p->npar_enable || p->flex10_enable)
- p->fcoe = false;
-#else
/* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */
p->fcoe = false;
-#endif
/* count the enabled ports (aka the "not disabled" ports) */
hw->num_ports = 0;
@@ -5452,12 +5498,12 @@ STATIC void i40e_fix_up_geneve_vni(
u16 tnl_type;
u32 ti;
- tnl_type = (le16_to_cpu(f[i].flags) &
+ tnl_type = (LE16_TO_CPU(f[i].flags) &
I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
- ti = le32_to_cpu(f[i].tenant_id);
- f[i].tenant_id = cpu_to_le32(ti << 8);
+ ti = LE32_TO_CPU(f[i].tenant_id);
+ f[i].tenant_id = CPU_TO_LE32(ti << 8);
}
}
}
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index ed73e1d2..8bd5793d 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -68,7 +68,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
#define I40E_DEV_ID_SFP_I_X722 0x37D3
-#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 03dda937..3aab5ca9 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -124,6 +124,8 @@ enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
@@ -438,6 +440,7 @@ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
u32 pba_num_size);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw);
/* prototype for functions used for NVM access */
enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw);
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index 5349419f..b5f72c32 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -157,8 +157,10 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
-#define I40E_MDIO_STCODE 0
-#define I40E_MDIO_OPCODE_ADDRESS 0
+#define I40E_MDIO_STCODE I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_OPCODE_ADDRESS I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
@@ -292,46 +294,48 @@ struct i40e_link_status {
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
-enum i40e_aq_capabilities_phy_type {
- I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
- I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
- I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
- I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
- I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
- I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
- I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
- I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
- I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
- I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
- I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
- I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
- I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
- I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
- I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
- I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
- I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
- I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
- I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
- I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
- I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
- I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
- I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
- I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
-};
-
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
- u32 phy_types;
-};
-
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_KR + 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_CR + 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_SR + 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_LR + 32)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
@@ -1388,6 +1392,23 @@ struct i40e_veb_tc_stats {
u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS];
};
+/* Statistics collected per function for FCoE */
+struct i40e_fcoe_stats {
+ u64 rx_fcoe_packets; /* fcoeprc */
+ u64 rx_fcoe_dwords; /* focedwrc */
+ u64 rx_fcoe_dropped; /* fcoerpdc */
+ u64 tx_fcoe_packets; /* fcoeptc */
+ u64 tx_fcoe_dwords; /* focedwtc */
+ u64 fcoe_bad_fccrc; /* fcoecrc */
+ u64 fcoe_last_error; /* fcoelast */
+ u64 fcoe_ddp_count; /* fcoeddpc */
+};
+
+/* offset to per function FCoE statistics block */
+#define I40E_FCOE_VF_STAT_OFFSET 0
+#define I40E_FCOE_PF_STAT_OFFSET 128
+#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
+
/* Statistics collected by the MAC */
struct i40e_hw_port_stats {
/* eth stats collected by the port */
@@ -1509,6 +1530,208 @@ struct i40e_hw_port_stats {
#define I40E_SRRD_SRCTL_ATTEMPTS 100000
+/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
+
+enum i40E_fcoe_tx_ctx_desc_cmd_bits {
+ I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */
+ I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10,
+ I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20,
+ I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40,
+ I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80
+};
+
+/* FCoE DIF/DIX Context descriptor */
+struct i40e_fcoe_difdix_context_desc {
+ __le64 flags_buff0_buff1_ref;
+ __le64 difapp_msk_bias;
+};
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT 0
+#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_MASK (0xFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT)
+
+enum i40e_fcoe_difdix_ctx_desc_flags_bits {
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_RSVD = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGCHK = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGNOTCHK = 0x0004,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_OPAQUE = 0x0000,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY = 0x0008,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPTAG = 0x0010,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPREFTAG = 0x0018,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_CNST = 0x0000,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_INC1BLK = 0x0020,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_APPTAG = 0x0040,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_RSVD = 0x0060,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_XSUM = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_CRC = 0x0080,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_UNTAG = 0x0000,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_BUF = 0x0100,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_RSVD = 0x0200,
+ /* 2 BITS */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_EMBDTAGS = 0x0300,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_UNTAG = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_TAG = 0x0400,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_512B = 0x0000,
+ /* 1 BIT */
+ I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_4K = 0x0800
+};
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT 12
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_MASK (0x3FFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT 22
+#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_MASK (0x3FFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT 32
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_MASK (0xFFFFFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT 0
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MASK (0xFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT 16
+#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_MASK (0xFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT)
+
+#define I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT 32
+#define I40E_FCOE_DIFDIX_CTX_QW0_REF_BIAS_MASK (0xFFFFFFFFULL << \
+ I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT)
+
+/* FCoE DIF/DIX Buffers descriptor */
+struct i40e_fcoe_difdix_buffers_desc {
+ __le64 buff_addr0;
+ __le64 buff_addr1;
+};
+
+/* FCoE DDP Context descriptor */
+struct i40e_fcoe_ddp_context_desc {
+ __le64 rsvd;
+ __le64 type_cmd_foff_lsize;
+};
+
+#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0
+#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \
+ I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4
+#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
+ I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
+
+enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */
+ I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */
+ I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */
+};
+
+#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16
+#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \
+ I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
+
+#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32
+#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \
+ I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
+
+/* FCoE DDP/DWO Queue Context descriptor */
+struct i40e_fcoe_queue_context_desc {
+ __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */
+ __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */
+};
+
+#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0
+#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \
+ I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12
+#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \
+ I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0
+#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \
+ I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13
+#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \
+ I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+enum i40e_fcoe_queue_ctx_desc_tph_bits {
+ I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1,
+ I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2
+};
+
+#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30
+#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \
+ I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
+
+/* FCoE DDP/DWO Filter Context descriptor */
+struct i40e_fcoe_filter_context_desc {
+ __le32 param;
+ __le16 seqn;
+
+ /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
+ __le16 rsvd_dmaindx;
+
+ /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
+ __le64 flags_rsvd_lanq;
+};
+
+#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
+#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \
+ I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
+
+enum i40e_fcoe_filter_ctx_desc_flags_bits {
+ I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00,
+ I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01,
+ I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00,
+ I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02,
+ I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00,
+ I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04
+};
+
+#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0
+#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \
+ I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
+
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \
+ I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
+
+#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53
+#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \
+ I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
+
enum i40e_switch_element_types {
I40E_SWITCH_ELEMENT_TYPE_MAC = 1,
I40E_SWITCH_ELEMENT_TYPE_PF = 2,
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 13068cc4..67778baf 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -418,6 +418,7 @@ static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
void *arg);
static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
+static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
static void i40e_configure_registers(struct i40e_hw *hw);
static void i40e_hw_init(struct rte_eth_dev *dev);
static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
@@ -481,7 +482,6 @@ static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) },
- { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_I_X722) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -670,10 +670,11 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
static struct eth_driver rte_i40e_pmd = {
.pci_drv = {
- .name = "rte_i40e_pmd",
.id_table = pci_id_i40e_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_i40e_dev_init,
.eth_dev_uninit = eth_i40e_dev_uninit,
@@ -708,28 +709,8 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
return 0;
}
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
- */
-static int
-rte_i40e_pmd_init(const char *name __rte_unused,
- const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- rte_eth_driver_register(&rte_i40e_pmd);
-
- return 0;
-}
-
-static struct rte_driver rte_i40e_driver = {
- .type = PMD_PDEV,
- .init = rte_i40e_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_i40e_driver, i40e);
-DRIVER_REGISTER_PCI_TABLE(i40e, pci_id_i40e_map);
+RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
#ifndef I40E_GLQF_ORT
#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
@@ -933,8 +914,10 @@ config_floating_veb(struct rte_eth_dev *dev)
memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list));
if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) {
- pf->floating_veb = is_floating_veb_supported(pci_dev->devargs);
- config_vf_floating_veb(pci_dev->devargs, pf->floating_veb,
+ pf->floating_veb =
+ is_floating_veb_supported(pci_dev->device.devargs);
+ config_vf_floating_veb(pci_dev->device.devargs,
+ pf->floating_veb,
pf->floating_veb_list);
} else {
pf->floating_veb = false;
@@ -1042,7 +1025,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
config_floating_veb(dev);
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
-
+ ret = i40e_dev_sync_phy_type(hw);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret);
+ goto err_sync_phy_type;
+ }
/*
* On X710, performance number is far from the expectation on recent
* firmware versions. The fix for this issue may not be integrated in
@@ -1207,6 +1194,7 @@ err_msix_pool_init:
err_qp_pool_init:
err_parameter_init:
err_get_capabilities:
+err_sync_phy_type:
(void)i40e_shutdown_adminq(hw);
return ret;
@@ -1585,6 +1573,8 @@ i40e_parse_link_speeds(uint16_t link_speeds)
if (link_speeds & ETH_LINK_SPEED_40G)
link_speed |= I40E_LINK_SPEED_40GB;
+ if (link_speeds & ETH_LINK_SPEED_25G)
+ link_speed |= I40E_LINK_SPEED_25GB;
if (link_speeds & ETH_LINK_SPEED_20G)
link_speed |= I40E_LINK_SPEED_20GB;
if (link_speeds & ETH_LINK_SPEED_10G)
@@ -1610,6 +1600,7 @@ i40e_phy_conf_link(struct i40e_hw *hw,
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_LOW_POWER;
const uint8_t advt = I40E_LINK_SPEED_40GB |
+ I40E_LINK_SPEED_25GB |
I40E_LINK_SPEED_10GB |
I40E_LINK_SPEED_1GB |
I40E_LINK_SPEED_100MB;
@@ -1662,13 +1653,14 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
struct rte_eth_conf *conf = &dev->data->dev_conf;
speed = i40e_parse_link_speeds(conf->link_speeds);
- abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
abilities |= I40E_AQ_PHY_AN_ENABLED;
abilities |= I40E_AQ_PHY_LINK_ENABLED;
/* Skip changing speed on 40G interfaces, FW does not support */
- if (i40e_is_40G_device(hw->device_id)) {
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
speed = I40E_LINK_SPEED_UNKNOWN;
abilities |= I40E_AQ_PHY_AN_ENABLED;
}
@@ -1764,7 +1756,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* Apply link configure */
if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
- ETH_LINK_SPEED_20G | ETH_LINK_SPEED_40G)) {
+ ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G)) {
PMD_DRV_LOG(ERR, "Invalid link setting");
goto err_up;
}
@@ -1994,9 +1987,11 @@ static int
i40e_dev_set_link_down(struct rte_eth_dev *dev)
{
uint8_t speed = I40E_LINK_SPEED_UNKNOWN;
- uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ uint8_t abilities = 0;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+ abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
return i40e_phy_conf_link(hw, abilities, speed);
}
@@ -2056,6 +2051,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
case I40E_LINK_SPEED_20GB:
link.link_speed = ETH_SPEED_NUM_20G;
break;
+ case I40E_LINK_SPEED_25GB:
+ link.link_speed = ETH_SPEED_NUM_25G;
+ break;
case I40E_LINK_SPEED_40GB:
link.link_speed = ETH_SPEED_NUM_40G;
break;
@@ -2605,7 +2603,11 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO |
+ DEV_TX_OFFLOAD_IPIP_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
sizeof(uint32_t);
dev_info->reta_size = pf->hash_lut_size;
@@ -2655,9 +2657,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_tx_queues += dev_info->vmdq_queue_num;
}
- if (i40e_is_40G_device(hw->device_id))
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types))
/* For XL710 */
dev_info->speed_capa = ETH_LINK_SPEED_40G;
+ else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
+ /* For XXV710 */
+ dev_info->speed_capa = ETH_LINK_SPEED_25G;
else
/* For X710 */
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
@@ -2903,7 +2908,7 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
if (err < 0)
return -ENOSYS;
- if (i40e_is_40G_device(hw->device_id)) {
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
/* Configure flow control refresh threshold,
* the value for stat_tx_pause_refresh_timer[8]
* is used for global pause operation.
@@ -5496,7 +5501,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
if (!ret) {
i40e_notify_all_vfs_link_status(dev);
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC);
+ RTE_ETH_EVENT_INTR_LSC, NULL);
}
break;
default:
@@ -5564,6 +5569,7 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
PMD_DRV_LOG(INFO, "ICR0: adminq event");
i40e_dev_handle_aq_msg(dev);
}
+
done:
/* Enable interrupt */
i40e_pf_enable_irq0(hw);
@@ -6125,7 +6131,7 @@ DONE:
/* Configure hash enable flags for RSS */
uint64_t
-i40e_config_hena(uint64_t flags)
+i40e_config_hena(uint64_t flags, enum i40e_mac_type type)
{
uint64_t hena = 0;
@@ -6134,20 +6140,42 @@ i40e_config_hena(uint64_t flags)
if (flags & ETH_RSS_FRAG_IPV4)
hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4;
- if (flags & ETH_RSS_NONFRAG_IPV4_TCP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
- if (flags & ETH_RSS_NONFRAG_IPV4_UDP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ if (flags & ETH_RSS_NONFRAG_IPV4_TCP) {
+ if (type == I40E_MAC_X722) {
+ hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ } else
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
+ }
+ if (flags & ETH_RSS_NONFRAG_IPV4_UDP) {
+ if (type == I40E_MAC_X722) {
+ hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+ } else
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ }
if (flags & ETH_RSS_NONFRAG_IPV4_SCTP)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
if (flags & ETH_RSS_NONFRAG_IPV4_OTHER)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
if (flags & ETH_RSS_FRAG_IPV6)
hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6;
- if (flags & ETH_RSS_NONFRAG_IPV6_TCP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
- if (flags & ETH_RSS_NONFRAG_IPV6_UDP)
- hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ if (flags & ETH_RSS_NONFRAG_IPV6_TCP) {
+ if (type == I40E_MAC_X722) {
+ hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ } else
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ }
+ if (flags & ETH_RSS_NONFRAG_IPV6_UDP) {
+ if (type == I40E_MAC_X722) {
+ hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+ } else
+ hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ }
if (flags & ETH_RSS_NONFRAG_IPV6_SCTP)
hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
if (flags & ETH_RSS_NONFRAG_IPV6_OTHER)
@@ -6170,8 +6198,18 @@ i40e_parse_hena(uint64_t flags)
rss_hf |= ETH_RSS_FRAG_IPV4;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+#ifdef X722_SUPPORT
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+#ifdef X722_SUPPORT
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
@@ -6180,8 +6218,18 @@ i40e_parse_hena(uint64_t flags)
rss_hf |= ETH_RSS_FRAG_IPV6;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+#ifdef X722_SUPPORT
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+#ifdef X722_SUPPORT
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+ if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
+#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
@@ -6201,7 +6249,10 @@ i40e_pf_disable_rss(struct i40e_pf *pf)
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
- hena &= ~I40E_RSS_HENA_ALL;
+ if (hw->mac.type == I40E_MAC_X722)
+ hena &= ~I40E_RSS_HENA_ALL_X722;
+ else
+ hena &= ~I40E_RSS_HENA_ALL;
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
I40E_WRITE_FLUSH(hw);
@@ -6288,8 +6339,11 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf)
rss_hf = rss_conf->rss_hf;
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
- hena &= ~I40E_RSS_HENA_ALL;
- hena |= i40e_config_hena(rss_hf);
+ if (hw->mac.type == I40E_MAC_X722)
+ hena &= ~I40E_RSS_HENA_ALL_X722;
+ else
+ hena &= ~I40E_RSS_HENA_ALL;
+ hena |= i40e_config_hena(rss_hf, hw->mac.type);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
I40E_WRITE_FLUSH(hw);
@@ -6308,7 +6362,9 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev,
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32;
- if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
+ if (!(hena & ((hw->mac.type == I40E_MAC_X722)
+ ? I40E_RSS_HENA_ALL_X722
+ : I40E_RSS_HENA_ALL))) { /* RSS disabled */
if (rss_hf != 0) /* Enable RSS */
return -EINVAL;
return 0; /* Nothing to do */
@@ -7045,6 +7101,26 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_FLEX_PAYLOAD,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_FLEX_PAYLOAD,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7054,6 +7130,17 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS |
+ I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL |
+ I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
+ I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7087,6 +7174,26 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7096,6 +7203,17 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ I40E_INSET_DMAC | I40E_INSET_SMAC |
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC |
+ I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR |
+ I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
+ I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
+ I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
+ I40E_INSET_FLEX_PAYLOAD,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7135,11 +7253,30 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
@@ -7161,11 +7298,30 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
@@ -7218,9 +7374,22 @@ i40e_get_default_input_set(uint16_t pctype)
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
@@ -7232,9 +7401,22 @@ i40e_get_default_input_set(uint16_t pctype)
[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
+ I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
@@ -7508,8 +7690,14 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
- if (!I40E_VALID_PCTYPE(pctype))
- continue;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (!I40E_VALID_PCTYPE_X722(pctype))
+ continue;
+ } else {
+ if (!I40E_VALID_PCTYPE(pctype))
+ continue;
+ }
+
input_set = i40e_get_default_input_set(pctype);
num = i40e_generate_inset_mask_reg(input_set, mask_reg,
@@ -7575,7 +7763,15 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
- pctype = i40e_flowtype_to_pctype(conf->flow_type);
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get translated pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw,
+ I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype(
+ conf->flow_type)));
+ } else
+ pctype = i40e_flowtype_to_pctype(conf->flow_type);
+
ret = i40e_parse_input_set(&input_set, pctype, conf->field,
conf->inset_size);
if (ret) {
@@ -7644,7 +7840,9 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
PMD_DRV_LOG(ERR, "invalid flow_type input.");
return -EINVAL;
}
+
pctype = i40e_flowtype_to_pctype(conf->flow_type);
+
ret = i40e_parse_input_set(&input_set, pctype, conf->field,
conf->inset_size);
if (ret) {
@@ -8017,8 +8215,18 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
[I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
+ RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
@@ -8026,8 +8234,18 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
[I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
+ [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
+#ifdef X722_SUPPORT
+ [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
+ RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
+#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
@@ -8063,6 +8281,23 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
#define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606
#define I40E_GL_SWR_PM_UP_THR 0x269FBC
+static int
+i40e_dev_sync_phy_type(struct i40e_hw *hw)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp phy_ab;
+ int ret = -ENOTSUP;
+
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab,
+ NULL);
+
+ if (status)
+ return ret;
+
+ return 0;
+}
+
+
static void
i40e_configure_registers(struct i40e_hw *hw)
{
@@ -8080,7 +8315,8 @@ i40e_configure_registers(struct i40e_hw *hw)
for (i = 0; i < RTE_DIM(reg_table); i++) {
if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
- if (i40e_is_40G_device(hw->device_id)) /* For XL710 */
+ if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
+ I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
reg_table[i].val =
I40E_GL_SWR_PM_UP_THR_SF_VALUE;
else /* For X710 */
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 61dfa932..298cef48 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -149,6 +149,16 @@ enum i40e_flxpld_layer_idx {
ETH_RSS_NONFRAG_IPV6_OTHER | \
ETH_RSS_L2_PAYLOAD)
+/* All bits of RSS hash enable for X722*/
+#define I40E_RSS_HENA_ALL_X722 ( \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ I40E_RSS_HENA_ALL)
+
/* All bits of RSS hash enable */
#define I40E_RSS_HENA_ALL ( \
(1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
@@ -577,7 +587,7 @@ int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi,
struct i40e_vsi_vlan_pvid_info *info);
int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on);
int i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on);
-uint64_t i40e_config_hena(uint64_t flags);
+uint64_t i40e_config_hena(uint64_t flags, enum i40e_mac_type type);
uint64_t i40e_parse_hena(uint64_t flags);
enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf);
enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf);
@@ -701,6 +711,25 @@ i40e_calc_itr_interval(int16_t interval)
(flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER || \
(flow_type) == RTE_ETH_FLOW_L2_PAYLOAD)
+#define I40E_VALID_PCTYPE_X722(pctype) \
+ ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6 || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \
+ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \
+ (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD)
+
#define I40E_VALID_PCTYPE(pctype) \
((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \
@@ -714,4 +743,18 @@ i40e_calc_itr_interval(int16_t interval)
(pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \
(pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD)
+#define I40E_PHY_TYPE_SUPPORT_40G(phy_type) \
+ (((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_KR4) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_AOC) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_CR4) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_SR4) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_LR4))
+
+#define I40E_PHY_TYPE_SUPPORT_25G(phy_type) \
+ (((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_KR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_CR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_SR) || \
+ ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR))
+
#endif /* _I40E_ETHDEV_H_ */
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index ba63a7f1..aa306d61 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1314,7 +1314,7 @@ i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
switch (pf_msg->event) {
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n");
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
break;
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n");
@@ -1527,38 +1527,18 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
*/
static struct eth_driver rte_i40evf_pmd = {
.pci_drv = {
- .name = "rte_i40evf_pmd",
.id_table = pci_id_i40evf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = i40evf_dev_init,
.eth_dev_uninit = i40evf_dev_uninit,
.dev_private_size = sizeof(struct i40e_adapter),
};
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices.
- */
-static int
-rte_i40evf_pmd_init(const char *name __rte_unused,
- const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_i40evf_pmd);
-
- return 0;
-}
-
-static struct rte_driver rte_i40evf_driver = {
- .type = PMD_PDEV,
- .init = rte_i40evf_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_i40evf_driver, i40evf);
-DRIVER_REGISTER_PCI_TABLE(i40evf, pci_id_i40evf_map);
+RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
@@ -2539,8 +2519,11 @@ i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
rss_hf = rss_conf->rss_hf;
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
- hena &= ~I40E_RSS_HENA_ALL;
- hena |= i40e_config_hena(rss_hf);
+ if (hw->mac.type == I40E_MAC_X722)
+ hena &= ~I40E_RSS_HENA_ALL_X722;
+ else
+ hena &= ~I40E_RSS_HENA_ALL;
+ hena |= i40e_config_hena(rss_hf, hw->mac.type);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
I40EVF_WRITE_FLUSH(hw);
@@ -2556,7 +2539,10 @@ i40evf_disable_rss(struct i40e_vf *vf)
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
- hena &= ~I40E_RSS_HENA_ALL;
+ if (hw->mac.type == I40E_MAC_X722)
+ hena &= ~I40E_RSS_HENA_ALL_X722;
+ else
+ hena &= ~I40E_RSS_HENA_ALL;
i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
I40EVF_WRITE_FLUSH(hw);
@@ -2617,7 +2603,9 @@ i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
- if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */
+ if (!(hena & ((hw->mac.type == I40E_MAC_X722)
+ ? I40E_RSS_HENA_ALL_X722
+ : I40E_RSS_HENA_ALL))) { /* RSS disabled */
if (rss_hf != 0) /* Enable RSS */
return -EINVAL;
return 0;
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index f65c4110..335bf15c 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -251,7 +251,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
/* reserve memory for the fdir programming packet */
snprintf(z_name, sizeof(z_name), "%s_%s_%d",
- eth_dev->driver->pci_drv.name,
+ eth_dev->driver->pci_drv.driver.name,
I40E_FDIR_MZ_NAME,
eth_dev->data->port_id);
mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
@@ -353,8 +353,15 @@ i40e_init_flx_pld(struct i40e_pf *pf)
/* initialize the masks */
for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
- if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)pctype))
- continue;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (!I40E_VALID_PCTYPE_X722(
+ (enum i40e_filter_pctype)pctype))
+ continue;
+ } else {
+ if (!I40E_VALID_PCTYPE(
+ (enum i40e_filter_pctype)pctype))
+ continue;
+ }
pf->fdir.flex_mask[pctype].word_mask = 0;
i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
@@ -664,7 +671,16 @@ i40e_fdir_configure(struct rte_eth_dev *dev)
i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
/* configure flex mask*/
for (i = 0; i < conf->nb_flexmasks; i++) {
- pctype = i40e_flowtype_to_pctype(conf->flex_mask[i].flow_type);
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get translated pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+ hw, I40E_GLQF_FD_PCTYPES(
+ (int)i40e_flowtype_to_pctype(
+ conf->flex_mask[i].flow_type)));
+ } else
+ pctype = i40e_flowtype_to_pctype(
+ conf->flex_mask[i].flow_type);
+
i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
}
@@ -1012,6 +1028,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
@@ -1044,7 +1061,16 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
return ret;
}
- pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ /* get translated pctype value in fd pctype register */
+ pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
+ hw, I40E_GLQF_FD_PCTYPES(
+ (int)i40e_flowtype_to_pctype(
+ filter->input.flow_type)));
+ } else
+ pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
+
ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
if (ret < 0) {
PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
@@ -1273,6 +1299,7 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
{
struct i40e_fdir_flex_mask *mask;
struct rte_eth_fdir_flex_mask *ptr = flex_mask;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
uint16_t flow_type;
uint8_t i, j;
uint16_t off_bytes, mask_tmp;
@@ -1281,8 +1308,13 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
i++) {
mask = &pf->fdir.flex_mask[i];
- if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i))
- continue;
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (!I40E_VALID_PCTYPE_X722((enum i40e_filter_pctype)i))
+ continue;
+ } else {
+ if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i))
+ continue;
+ }
flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i);
for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 4e2f6b63..ddfc140d 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -904,11 +904,11 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
- dev->data->dev_link.link_status;
+ dev->data->dev_link.link_status;
event.event_data.link_event.link_speed =
- (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+ (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
- I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+ I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
void
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 0556a4d4..7ae7d9fb 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -142,8 +142,14 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword)
return flags;
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ flags |= PKT_RX_IP_CKSUM_GOOD;
+
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT)))
flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_L4_CKSUM_GOOD;
+
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT)))
flags |= PKT_RX_EIP_CKSUM_BAD;
@@ -174,569 +180,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword)
}
#endif
-/* For each value it means, datasheet of hardware can tell more details
- *
- * @note: fix i40e_dev_supported_ptypes_get() if any change here.
- */
-static inline uint32_t
-i40e_rxd_pkt_type_mapping(uint8_t ptype)
-{
- static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
- /* L2 types */
- /* [0] reserved */
- [1] = RTE_PTYPE_L2_ETHER,
- [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
- /* [3] - [5] reserved */
- [6] = RTE_PTYPE_L2_ETHER_LLDP,
- /* [7] - [10] reserved */
- [11] = RTE_PTYPE_L2_ETHER_ARP,
- /* [12] - [21] reserved */
-
- /* Non tunneled IPv4 */
- [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_FRAG,
- [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_NONFRAG,
- [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
- /* [25] reserved */
- [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
- [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_SCTP,
- [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_ICMP,
-
- /* IPv4 --> IPv4 */
- [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [32] reserved */
- [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> IPv6 */
- [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [39] reserved */
- [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN */
- [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
- [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [47] reserved */
- [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
- [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [54] reserved */
- [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
- [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
- [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [62] reserved */
- [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
- [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [69] reserved */
- [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
- [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
- [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [77] reserved */
- [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
- [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [84] reserved */
- [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* Non tunneled IPv6 */
- [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_FRAG,
- [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_NONFRAG,
- [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
- /* [91] reserved */
- [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
- [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_SCTP,
- [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_ICMP,
-
- /* IPv6 --> IPv4 */
- [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [98] reserved */
- [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> IPv6 */
- [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [105] reserved */
- [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_IP |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN */
- [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
- [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [113] reserved */
- [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
- [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [120] reserved */
- [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
- [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
- [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [128] reserved */
- [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
- [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [135] reserved */
- [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
- [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
- [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [143] reserved */
- [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
- [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_FRAG,
- [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_NONFRAG,
- [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_UDP,
- /* [150] reserved */
- [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_TCP,
- [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_SCTP,
- [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_TUNNEL_GRENAT |
- RTE_PTYPE_INNER_L2_ETHER_VLAN |
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_INNER_L4_ICMP,
-
- /* L2 NSH packet type */
- [154] = RTE_PTYPE_L2_ETHER_NSH,
- [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_FRAG,
- [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_NONFRAG,
- [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
- [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
- [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_SCTP,
- [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
- RTE_PTYPE_L4_ICMP,
- [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_FRAG,
- [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_NONFRAG,
- [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_UDP,
- [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_TCP,
- [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_SCTP,
- [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
- RTE_PTYPE_L4_ICMP,
-
- /* All others reserved */
- };
-
- return type_table[ptype];
-}
-
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01
#define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02
@@ -779,33 +222,65 @@ i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
#endif
return flags;
}
+
+static inline void
+i40e_parse_tunneling_params(uint64_t ol_flags,
+ union i40e_tx_offload tx_offload,
+ uint32_t *cd_tunneling)
+{
+ /* EIPT: External (outer) IP header type */
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
+ else if (ol_flags & PKT_TX_OUTER_IPV4)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ else if (ol_flags & PKT_TX_OUTER_IPV6)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ /* EIPLEN: External (outer) IP header length, in DWords */
+ *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* L4TUNT: L4 Tunneling Type */
+ switch (ol_flags & PKT_TX_TUNNEL_MASK) {
+ case PKT_TX_TUNNEL_IPIP:
+ /* for non UDP / GRE tunneling, set to 00b */
+ break;
+ case PKT_TX_TUNNEL_VXLAN:
+ case PKT_TX_TUNNEL_GENEVE:
+ *cd_tunneling |= I40E_TXD_CTX_UDP_TUNNELING;
+ break;
+ case PKT_TX_TUNNEL_GRE:
+ *cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
+ break;
+ default:
+ PMD_TX_LOG(ERR, "Tunnel type not supported\n");
+ return;
+ }
+
+ /* L4TUNLEN: L4 Tunneling Length, in Words
+ *
+ * We depend on app to set rte_mbuf.l2_len correctly.
+ * For IP in GRE it should be set to the length of the GRE
+ * header;
+ * for MAC in GRE or MAC in UDP it should be set to the length
+ * of the GRE or UDP headers plus the inner MAC up to including
+ * its last Ethertype.
+ */
+ *cd_tunneling |= (tx_offload.l2_len >> 1) <<
+ I40E_TXD_CTX_QW0_NATLEN_SHIFT;
+}
+
static inline void
i40e_txd_enable_checksum(uint64_t ol_flags,
uint32_t *td_cmd,
uint32_t *td_offset,
- union i40e_tx_offload tx_offload,
- uint32_t *cd_tunneling)
+ union i40e_tx_offload tx_offload)
{
- /* UDP tunneling packet TX checksum offload */
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
-
+ /* Set MACLEN */
+ if (ol_flags & PKT_TX_TUNNEL_MASK)
*td_offset |= (tx_offload.outer_l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
-
- if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
- else if (ol_flags & PKT_TX_OUTER_IPV4)
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- else if (ol_flags & PKT_TX_OUTER_IPV6)
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
-
- /* Now set the ctx descriptor fields */
- *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- (tx_offload.l2_len >> 1) <<
- I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
- } else
+ else
*td_offset |= (tx_offload.l2_len >> 1)
<< I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
@@ -994,6 +469,8 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
I40E_RXD_QW1_STATUS_SHIFT;
}
+ rte_smp_rmb();
+
/* Compute how many status bits were set */
for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++)
nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT);
@@ -1484,7 +961,8 @@ i40e_calc_context_desc(uint64_t flags)
{
static uint64_t mask = PKT_TX_OUTER_IP_CKSUM |
PKT_TX_TCP_SEG |
- PKT_TX_QINQ_PKT;
+ PKT_TX_QINQ_PKT |
+ PKT_TX_TUNNEL_MASK;
#ifdef RTE_LIBRTE_IEEE1588
mask |= PKT_TX_IEEE1588_TMST;
@@ -1506,7 +984,7 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
}
/**
- * in case of tunneling packet, the outer_l2_len and
+ * in case of non tunneling packet, the outer_l2_len and
* outer_l3_len must be 0.
*/
hdr_len = tx_offload.outer_l2_len +
@@ -1623,12 +1101,15 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Always enable CRC offload insertion */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
- /* Enable checksum offloading */
+ /* Fill in tunneling parameters if necessary */
cd_tunneling_params = 0;
- if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) {
- i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
- tx_offload, &cd_tunneling_params);
- }
+ if (ol_flags & PKT_TX_TUNNEL_MASK)
+ i40e_parse_tunneling_params(ol_flags, tx_offload,
+ &cd_tunneling_params);
+ /* Enable checksum offloading */
+ if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)
+ i40e_txd_enable_checksum(ol_flags, &td_cmd,
+ &td_offset, tx_offload);
if (nb_ctx) {
/* Setup TX context descriptor if required */
@@ -2136,7 +1617,9 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc ||
#endif
- dev->rx_pkt_burst == i40e_recv_scattered_pkts)
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec)
return ptypes;
return NULL;
}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index 98179f00..ecdb13cb 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -255,4 +255,567 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
struct i40e_tx_queue *txq);
void i40e_set_tx_function(struct rte_eth_dev *dev);
+/* For each value it means, datasheet of hardware can tell more details
+ *
+ * @note: fix i40e_dev_supported_ptypes_get() if any change here.
+ */
+static inline uint32_t
+i40e_rxd_pkt_type_mapping(uint8_t ptype)
+{
+ static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
+ /* L2 types */
+ /* [0] reserved */
+ [1] = RTE_PTYPE_L2_ETHER,
+ [2] = RTE_PTYPE_L2_ETHER_TIMESYNC,
+ /* [3] - [5] reserved */
+ [6] = RTE_PTYPE_L2_ETHER_LLDP,
+ /* [7] - [10] reserved */
+ [11] = RTE_PTYPE_L2_ETHER_ARP,
+ /* [12] - [21] reserved */
+
+ /* Non tunneled IPv4 */
+ [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [25] reserved */
+ [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv4 --> IPv4 */
+ [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [32] reserved */
+ [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> IPv6 */
+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [39] reserved */
+ [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN */
+ [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */
+ [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [47] reserved */
+ [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */
+ [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [54] reserved */
+ [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC */
+ [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [62] reserved */
+ [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [69] reserved */
+ [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */
+ [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
+ [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [77] reserved */
+ [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
+ [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [84] reserved */
+ [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* Non tunneled IPv6 */
+ [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ /* [91] reserved */
+ [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* IPv6 --> IPv4 */
+ [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [98] reserved */
+ [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> IPv6 */
+ [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [105] reserved */
+ [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_IP |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN */
+ [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */
+ [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [113] reserved */
+ [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */
+ [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [120] reserved */
+ [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC */
+ [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */
+ [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [128] reserved */
+ [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */
+ [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [135] reserved */
+ [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */
+ [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */
+ [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [143] reserved */
+ [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */
+ [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_FRAG,
+ [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_UDP,
+ /* [150] reserved */
+ [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_TCP,
+ [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_SCTP,
+ [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_TUNNEL_GRENAT |
+ RTE_PTYPE_INNER_L2_ETHER_VLAN |
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_INNER_L4_ICMP,
+
+ /* L2 NSH packet type */
+ [154] = RTE_PTYPE_L2_ETHER_NSH,
+ [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+ [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_FRAG,
+ [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_NONFRAG,
+ [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_UDP,
+ [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_TCP,
+ [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_SCTP,
+ [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+ RTE_PTYPE_L4_ICMP,
+
+ /* All others reserved */
+ };
+
+ return type_table[ptype];
+}
+
#endif /* _I40E_RXTX_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
new file mode 100644
index 00000000..6cb5dce9
--- /dev/null
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -0,0 +1,251 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _I40E_RXTX_VEC_COMMON_H_
+#define _I40E_RXTX_VEC_COMMON_H_
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+
+static inline uint16_t
+reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+ uint16_t nb_bufs, uint8_t *split_flags)
+{
+ struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
+ struct rte_mbuf *start = rxq->pkt_first_seg;
+ struct rte_mbuf *end = rxq->pkt_last_seg;
+ unsigned pkt_idx, buf_idx;
+
+ for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+ if (end != NULL) {
+ /* processing a split packet */
+ end->next = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+ start->nb_segs++;
+ start->pkt_len += rx_bufs[buf_idx]->data_len;
+ end = end->next;
+
+ if (!split_flags[buf_idx]) {
+ /* it's the last packet of the set */
+ start->hash = end->hash;
+ start->ol_flags = end->ol_flags;
+ /* we need to strip crc for the whole packet */
+ start->pkt_len -= rxq->crc_len;
+ if (end->data_len > rxq->crc_len) {
+ end->data_len -= rxq->crc_len;
+ } else {
+ /* free up last mbuf */
+ struct rte_mbuf *secondlast = start;
+
+ while (secondlast->next != end)
+ secondlast = secondlast->next;
+ secondlast->data_len -= (rxq->crc_len -
+ end->data_len);
+ secondlast->next = NULL;
+ rte_pktmbuf_free_seg(end);
+ end = secondlast;
+ }
+ pkts[pkt_idx++] = start;
+ start = end = NULL;
+ }
+ } else {
+ /* not processing a split packet */
+ if (!split_flags[buf_idx]) {
+ /* not a split packet, save and skip */
+ pkts[pkt_idx++] = rx_bufs[buf_idx];
+ continue;
+ }
+ end = start = rx_bufs[buf_idx];
+ rx_bufs[buf_idx]->data_len += rxq->crc_len;
+ rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+ }
+ }
+
+ /* save the partial packet for next time */
+ rxq->pkt_first_seg = start;
+ rxq->pkt_last_seg = end;
+ memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+ return pkt_idx;
+}
+
+static inline int __attribute__((always_inline))
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+ struct i40e_tx_entry *txep;
+ uint32_t n;
+ uint32_t i;
+ int nb_free = 0;
+ struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+
+ /* check DD bits on threshold descriptor */
+ if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+ rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+ rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+ return 0;
+
+ n = txq->tx_rs_thresh;
+
+ /* first buffer to free from S/W ring is at index
+ * tx_next_dd - (tx_rs_thresh-1)
+ */
+ txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+ m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool)) {
+ free[nb_free++] = m;
+ } else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void *)free,
+ nb_free);
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ } else {
+ for (i = 1; i < n; i++) {
+ m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ /* buffers were freed, update counters */
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+ txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+ if (txq->tx_next_dd >= txq->nb_tx_desc)
+ txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct i40e_tx_entry *txep,
+ struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i;
+
+ for (i = 0; i < (int)nb_pkts; ++i)
+ txep[i].mbuf = tx_pkts[i];
+}
+
+static inline void
+_i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ const unsigned mask = rxq->nb_rx_desc - 1;
+ unsigned i;
+
+ if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+ return;
+
+ /* free all mbufs that are valid in the ring */
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ } else {
+ for (i = rxq->rx_tail;
+ i != rxq->rxrearm_start;
+ i = (i + 1) & mask) {
+ if (rxq->sw_ring[i].mbuf != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ }
+
+ rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+ /* set all entries to NULL */
+ memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static inline int
+i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer = *(uint64_t *)p;
+ return 0;
+}
+
+static inline int
+i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+ /* whithout rx ol_flags, no VP flag report */
+ if (rxmode->hw_vlan_strip != 0 ||
+ rxmode->hw_vlan_extend != 0 ||
+ rxmode->hw_ip_checksum != 0)
+ return -1;
+#endif
+
+ /* no fdir support */
+ if (fconf->mode != RTE_FDIR_MODE_NONE)
+ return -1;
+
+ /* - no csum error report support
+ * - no header split support
+ */
+ if (rxmode->header_split == 1)
+ return -1;
+
+ return 0;
+#else
+ RTE_SET_USED(dev);
+ return -1;
+#endif
+}
+#endif
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
new file mode 100644
index 00000000..011c54e0
--- /dev/null
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -0,0 +1,614 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <arm_neon.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+ uint64x2_t dma_addr0, dma_addr1;
+ uint64x2_t zero = vdupq_n_u64(0);
+ uint64_t paddr;
+ uint8x8_t p;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (unlikely(rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0)) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vst1q_u64((uint64_t *)&rxdp[i].read, zero);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ vst1_u8((uint8_t *)&mb0->rearm_data, p);
+ paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr0 = vdupq_n_u64(paddr);
+
+ /* flush desc with pa dma_addr */
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
+
+ vst1_u8((uint8_t *)&mb1->rearm_data, p);
+ paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
+ dma_addr1 = vdupq_n_u64(paddr);
+ vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+/* Handling the offload flags (olflags) field takes computation
+ * time when receiving packets. Therefore we provide a flag to disable
+ * the processing of the olflags field when they are not needed. This
+ * gives improved performance, at the cost of losing the offload info
+ * in the received packet
+ */
+#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+
+static inline void
+desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
+{
+ uint32x4_t vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const uint32x4_t rss_vlan_msk = {
+ 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const uint8x16_t vlan_flags = {
+ 0, 0, 0, 0,
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const uint8x16_t rss_flags = {
+ 0, PKT_RX_FDIR, 0, 0,
+ 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const uint8x16_t l3_l4e_flags = {
+ 0,
+ PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
+ vreinterpretq_u32_u64(descs[2])).val[1];
+ vlan1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
+ vreinterpretq_u32_u64(descs[3])).val[1];
+ vlan0 = vzipq_u32(vlan0, vlan1).val[0];
+
+ vlan1 = vandq_u32(vlan0, rss_vlan_msk);
+ vlan0 = vreinterpretq_u32_u8(vqtbl1q_u8(vlan_flags,
+ vreinterpretq_u8_u32(vlan1)));
+
+ rss = vshrq_n_u32(vlan1, 11);
+ rss = vreinterpretq_u32_u8(vqtbl1q_u8(rss_flags,
+ vreinterpretq_u8_u32(rss)));
+
+ l3_l4e = vshrq_n_u32(vlan1, 22);
+ l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags,
+ vreinterpretq_u8_u32(l3_l4e)));
+
+
+ vlan0 = vorrq_u32(vlan0, rss);
+ vlan0 = vorrq_u32(vlan0, l3_l4e);
+
+ rx_pkts[0]->ol_flags = vgetq_lane_u32(vlan0, 0);
+ rx_pkts[1]->ol_flags = vgetq_lane_u32(vlan0, 1);
+ rx_pkts[2]->ol_flags = vgetq_lane_u32(vlan0, 2);
+ rx_pkts[3]->ol_flags = vgetq_lane_u32(vlan0, 3);
+}
+#else
+#define desc_to_olflags_v(descs, rx_pkts) do {} while (0)
+#endif
+
+#define PKTLEN_SHIFT 10
+
+#define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL
+
+static inline void
+desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
+{
+ int i;
+ uint8_t ptype;
+ uint8x16_t tmp;
+
+ for (i = 0; i < 4; i++) {
+ tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
+ ptype = vgetq_lane_u8(tmp, 8);
+ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
+ }
+
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+
+ /* mask to shuffle from desc. to mbuf */
+ uint8x16_t shuf_msk = {
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 14, 15, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 14, 15, /* octet 15~14, 16 bits data_len */
+ 2, 3, /* octet 2~3, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+
+ uint8x16_t eop_check = {
+ 0x02, 0x00, 0x02, 0x00,
+ 0x02, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+ };
+
+ uint16x8_t crc_adjust = {
+ 0, 0, /* ignore pkt_type field */
+ rxq->crc_len, /* sub crc on pkt_len */
+ 0, /* ignore high-16bits of pkt_len */
+ rxq->crc_len, /* sub crc on data_len */
+ 0, 0, 0 /* ignore non-length fields */
+ };
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch_non_temporal(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ uint64x2_t descs[RTE_I40E_DESCS_PER_LOOP];
+ uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ uint16x8x2_t sterr_tmp1, sterr_tmp2;
+ uint64x2_t mbp1, mbp2;
+ uint16x8_t staterr;
+ uint16x8_t tmp;
+ uint64_t stat;
+
+ int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT};
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
+ rte_rmb();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
+
+ descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
+ /* B.1 load 2 mbuf point */
+ descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
+ descs[0] = vld1q_u64((uint64_t *)(rxdp));
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]),
+ len_shl);
+ descs[3] = vreinterpretq_u64_u32(len3);
+ uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]),
+ len_shl);
+ descs[2] = vreinterpretq_u64_u32(len2);
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
+ pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]),
+ vreinterpretq_u16_u64(descs[3]));
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]),
+ vreinterpretq_u16_u64(descs[2]));
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = vzipq_u16(sterr_tmp1.val[1],
+ sterr_tmp2.val[1]).val[0];
+ stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
+
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
+ pkt_mb4 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
+ pkt_mb3 = vreinterpretq_u8_u16(tmp);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]),
+ len_shl);
+ descs[1] = vreinterpretq_u64_u32(len1);
+ uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]),
+ len_shl);
+ descs[0] = vreinterpretq_u64_u32(len0);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
+ pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
+ pkt_mb4);
+ vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
+ pkt_mb3);
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
+ pkt_mb2 = vreinterpretq_u8_u16(tmp);
+ tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
+ pkt_mb1 = vreinterpretq_u8_u16(tmp);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ uint8x16_t eop_shuf_mask = {
+ 0x00, 0x02, 0x04, 0x06,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF};
+ uint8x16_t eop_bits;
+
+ /* and with mask to extract bits, flipping 1-0 */
+ eop_bits = vmvnq_u8(vreinterpretq_u8_u16(staterr));
+ eop_bits = vandq_u8(eop_bits, eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = vqtbl1q_u8(eop_bits, eop_shuf_mask);
+
+ /* store the resulting 32-bit value */
+ vst1q_lane_u32((uint32_t *)split_packet,
+ vreinterpretq_u32_u8(eop_bits), 0);
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
+ pkt_mb2);
+ vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
+ pkt_mb1);
+ desc_to_ptype_v(descs, &rx_pkts[pos]);
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK);
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /*
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned i = 0;
+
+ if (rxq->pkt_first_seg == NULL) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ uint64x2_t descriptor = {pkt->buf_physaddr + pkt->data_off, high_qw};
+ vst1q_u64((uint64_t *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ _i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index a9649d35..7c84a41a 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -39,6 +39,7 @@
#include "base/i40e_type.h"
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
#include <tmmintrin.h>
@@ -138,19 +139,14 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
static inline void
desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
{
- __m128i vlan0, vlan1, rss;
- union {
- uint16_t e[4];
- uint64_t dword;
- } vol;
+ __m128i vlan0, vlan1, rss, l3_l4e;
/* mask everything except RSS, flow director and VLAN flags
* bit2 is for VLAN tag, bit11 for flow director indication
* bit13:12 for RSS indication.
*/
- const __m128i rss_vlan_msk = _mm_set_epi16(
- 0x0000, 0x0000, 0x0000, 0x0000,
- 0x3804, 0x3804, 0x3804, 0x3804);
+ const __m128i rss_vlan_msk = _mm_set_epi32(
+ 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
@@ -163,23 +159,36 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
0, 0, PKT_RX_FDIR, 0);
- vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]);
- vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]);
- vlan0 = _mm_unpacklo_epi32(vlan0, vlan1);
+ const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD,
+ 0);
+
+ vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
+ vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
+ vlan0 = _mm_unpacklo_epi64(vlan0, vlan1);
vlan1 = _mm_and_si128(vlan0, rss_vlan_msk);
vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1);
- rss = _mm_srli_epi16(vlan1, 11);
+ rss = _mm_srli_epi32(vlan1, 11);
rss = _mm_shuffle_epi8(rss_flags, rss);
+ l3_l4e = _mm_srli_epi32(vlan1, 22);
+ l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
+
vlan0 = _mm_or_si128(vlan0, rss);
- vol.dword = _mm_cvtsi128_si64(vlan0);
+ vlan0 = _mm_or_si128(vlan0, l3_l4e);
- rx_pkts[0]->ol_flags = vol.e[0];
- rx_pkts[1]->ol_flags = vol.e[1];
- rx_pkts[2]->ol_flags = vol.e[2];
- rx_pkts[3]->ol_flags = vol.e[3];
+ rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0);
+ rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2);
+ rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4);
+ rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6);
}
#else
#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
@@ -187,6 +196,21 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
#define PKTLEN_SHIFT 10
+static inline void
+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+{
+ __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
+ __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
+
+ ptype0 = _mm_srli_epi64(ptype0, 30);
+ ptype1 = _mm_srli_epi64(ptype1, 30);
+
+ rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0));
+ rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8));
+ rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0));
+ rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8));
+}
+
/*
* Notice:
* - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
@@ -224,7 +248,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
*/
rxdp = rxq->rx_ring + rxq->rx_tail;
- _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+ rte_prefetch0(rxdp);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
@@ -283,6 +307,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* A.1 load 4 pkts desc */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
+
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
@@ -395,6 +420,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb2);
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
+ desc_to_ptype_v(descs, &rx_pkts[pos]);
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
@@ -423,68 +449,6 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
-static inline uint16_t
-reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
- uint16_t nb_bufs, uint8_t *split_flags)
-{
- struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
- struct rte_mbuf *start = rxq->pkt_first_seg;
- struct rte_mbuf *end = rxq->pkt_last_seg;
- unsigned pkt_idx, buf_idx;
-
- for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
- if (end != NULL) {
- /* processing a split packet */
- end->next = rx_bufs[buf_idx];
- rx_bufs[buf_idx]->data_len += rxq->crc_len;
-
- start->nb_segs++;
- start->pkt_len += rx_bufs[buf_idx]->data_len;
- end = end->next;
-
- if (!split_flags[buf_idx]) {
- /* it's the last packet of the set */
- start->hash = end->hash;
- start->ol_flags = end->ol_flags;
- /* we need to strip crc for the whole packet */
- start->pkt_len -= rxq->crc_len;
- if (end->data_len > rxq->crc_len) {
- end->data_len -= rxq->crc_len;
- } else {
- /* free up last mbuf */
- struct rte_mbuf *secondlast = start;
-
- while (secondlast->next != end)
- secondlast = secondlast->next;
- secondlast->data_len -= (rxq->crc_len -
- end->data_len);
- secondlast->next = NULL;
- rte_pktmbuf_free_seg(end);
- end = secondlast;
- }
- pkts[pkt_idx++] = start;
- start = end = NULL;
- }
- } else {
- /* not processing a split packet */
- if (!split_flags[buf_idx]) {
- /* not a split packet, save and skip */
- pkts[pkt_idx++] = rx_bufs[buf_idx];
- continue;
- }
- end = start = rx_bufs[buf_idx];
- rx_bufs[buf_idx]->data_len += rxq->crc_len;
- rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
- }
- }
-
- /* save the partial packet for next time */
- rxq->pkt_first_seg = start;
- rxq->pkt_last_seg = end;
- memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
- return pkt_idx;
-}
-
/* vPMD receive routine that reassembles scattered packets
* Notice:
* - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
@@ -550,73 +514,6 @@ vtx(volatile struct i40e_tx_desc *txdp,
vtx1(txdp, *pkt, flags);
}
-static inline int __attribute__((always_inline))
-i40e_tx_free_bufs(struct i40e_tx_queue *txq)
-{
- struct i40e_tx_entry *txep;
- uint32_t n;
- uint32_t i;
- int nb_free = 0;
- struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
-
- /* check DD bits on threshold descriptor */
- if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
- rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
- rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
- return 0;
-
- n = txq->tx_rs_thresh;
-
- /* first buffer to free from S/W ring is at index
- * tx_next_dd - (tx_rs_thresh-1)
- */
- txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool)) {
- free[nb_free++] = m;
- } else {
- rte_mempool_put_bulk(free[0]->pool,
- (void *)free,
- nb_free);
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
- } else {
- for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
- /* buffers were freed, update counters */
- txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
- txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
- if (txq->tx_next_dd >= txq->nb_tx_desc)
- txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
-
- return txq->tx_rs_thresh;
-}
-
-static inline void __attribute__((always_inline))
-tx_backlog_entry(struct i40e_tx_entry *txep,
- struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- int i;
-
- for (i = 0; i < (int)nb_pkts; ++i)
- txep[i].mbuf = tx_pkts[i];
-}
-
uint16_t
i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -687,49 +584,13 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
void __attribute__((cold))
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
- const unsigned mask = rxq->nb_rx_desc - 1;
- unsigned i;
-
- if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
- return;
-
- /* free all mbufs that are valid in the ring */
- if (rxq->rxrearm_nb == 0) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
- if (rxq->sw_ring[i].mbuf != NULL)
- rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
- }
- } else {
- for (i = rxq->rx_tail;
- i != rxq->rxrearm_start;
- i = (i + 1) & mask) {
- if (rxq->sw_ring[i].mbuf != NULL)
- rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
- }
- }
-
- rxq->rxrearm_nb = rxq->nb_rx_desc;
-
- /* set all entries to NULL */
- memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+ _i40e_rx_queue_release_mbufs_vec(rxq);
}
int __attribute__((cold))
i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
{
- uintptr_t p;
- struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
-
- mb_def.nb_segs = 1;
- mb_def.data_off = RTE_PKTMBUF_HEADROOM;
- mb_def.port = rxq->port_id;
- rte_mbuf_refcnt_set(&mb_def, 1);
-
- /* prevent compiler reordering: rearm_data covers previous fields */
- rte_compiler_barrier();
- p = (uintptr_t)&mb_def.rearm_data;
- rxq->mbuf_initializer = *(uint64_t *)p;
- return 0;
+ return i40e_rxq_vec_setup_default(rxq);
}
int __attribute__((cold))
@@ -742,34 +603,10 @@ int __attribute__((cold))
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
- struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-
/* need SSE4.1 support */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
return -1;
-
-#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0)
- return -1;
#endif
- /* no fdir support */
- if (fconf->mode != RTE_FDIR_MODE_NONE)
- return -1;
-
- /* - no csum error report support
- * - no header split support
- */
- if (rxmode->hw_ip_checksum == 1 ||
- rxmode->header_split == 1)
- return -1;
-
- return 0;
-#else
- RTE_SET_USED(dev);
- return -1;
-#endif
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
}
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index a6c71f34..94ddc7b8 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -119,6 +119,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
endif
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
# this lib depends upon:
DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index 76e78051..6b54c31e 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -34,7 +34,7 @@ Intel® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2016.04.12 released by ND. The sub-directory of base/
+cid-10g-shared-code.2016.08.15 released by ND. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c
index db808801..724dcbbc 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -995,19 +995,19 @@ STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
* @vlan: VLAN id to write to VLAN filter
* @vind: VMDq output index that maps queue to VLAN id in VFTA
* @vlan_on: boolean flag to turn on/off VLAN in VFTA
- * @bypass_vlvf: boolean flag - unused
+ * @vlvf_bypass: boolean flag - unused
*
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on, bool bypass_vlvf)
+ bool vlan_on, bool vlvf_bypass)
{
u32 regindex;
u32 bitindex;
u32 bits;
u32 vftabyte;
- UNREFERENCED_1PARAMETER(bypass_vlvf);
+ UNREFERENCED_1PARAMETER(vlvf_bypass);
DEBUGFUNC("ixgbe_set_vfta_82598");
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
index 5bc7c2b9..832242ee 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -1178,6 +1178,7 @@ mac_reset_top:
if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
@@ -1809,14 +1810,23 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
}
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m);
- /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSIP4M and
- * FDIRDIP4M in cloud mode to allow L3/L3 packets to
- * tunnel.
+ /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM,
+ * FDIRSIP4M and FDIRDIP4M in cloud mode to allow
+ * L3/L3 packets to tunnel.
*/
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF);
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF);
+ break;
+ default:
+ break;
+ }
}
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
index 17868676..094ee526 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -106,8 +106,10 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
status = ixgbe_init_ops_X550(hw);
break;
case ixgbe_mac_X550EM_x:
+ status = ixgbe_init_ops_X550EM_x(hw);
+ break;
case ixgbe_mac_X550EM_a:
- status = ixgbe_init_ops_X550EM(hw);
+ status = ixgbe_init_ops_X550EM_a(hw);
break;
case ixgbe_mac_82599_vf:
case ixgbe_mac_X540_vf:
@@ -1090,7 +1092,7 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
bool vlvf_bypass)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
- vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED);
+ vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED);
}
/**
@@ -1100,7 +1102,7 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
* @vind: VMDq output index that maps queue to VLAN id in VLVFB
* @vlan_on: boolean flag to turn on/off VLAN in VLVF
* @vfta_delta: pointer to the difference between the current value of VFTA
- * and the desired value
+ * and the desired value
* @vfta: the desired value of the VFTA
* @vlvf_bypass: boolean flag indicating updating the default pool is okay
*
@@ -1110,7 +1112,7 @@ s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
u32 *vfta_delta, u32 vfta, bool vlvf_bypass)
{
return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind,
- vlan_on, vfta_delta, vfta, vlvf_bypass),
+ vlan_on, vfta_delta, vfta, vlvf_bypass),
IXGBE_NOT_IMPLEMENTED);
}
@@ -1659,6 +1661,7 @@ void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw)
hw->mac.ops.init_swfw_sync(hw);
}
+
void ixgbe_disable_rx(struct ixgbe_hw *hw)
{
if (hw->mac.ops.disable_rx)
diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h
index 3aad1da7..24c4ae8d 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.h
+++ b/drivers/net/ixgbe/base/ixgbe_api.h
@@ -45,6 +45,8 @@ extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index 1c5cb913..cca19efc 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -168,13 +168,24 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
- hw->mac.ops.check_link(hw, &speed, &link_up, false);
- /* if link is down, assume supported */
- if (link_up)
- supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ /* flow control autoneg black list */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ case IXGBE_DEV_ID_X550EM_A_QSFP:
+ case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ supported = false;
+ break;
+ default:
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ /* if link is down, assume supported */
+ if (link_up)
+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
true : false;
- else
- supported = true;
+ else
+ supported = true;
+ }
+
break;
case ixgbe_media_type_backplane:
supported = true;
@@ -188,6 +199,9 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550T:
case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
supported = true;
break;
default:
@@ -197,9 +211,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
break;
}
- ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
- "Device %x does not support flow control autoneg",
- hw->device_id);
+ if (!supported)
+ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
+ "Device %x does not support flow control autoneg",
+ hw->device_id);
return supported;
}
@@ -371,6 +386,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
s32 ret_val;
u32 ctrl_ext;
+ u16 device_caps;
DEBUGFUNC("ixgbe_start_hw_generic");
@@ -393,14 +409,29 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
/* Setup flow control */
ret_val = ixgbe_setup_fc(hw);
- if (ret_val != IXGBE_SUCCESS)
- goto out;
+ if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED)
+ return ret_val;
+
+ /* Cache bit indicating need for crosstalk fix */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+ hw->need_crosstalk_fix = false;
+ else
+ hw->need_crosstalk_fix = true;
+ break;
+ default:
+ hw->need_crosstalk_fix = false;
+ break;
+ }
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
-out:
- return ret_val;
+ return IXGBE_SUCCESS;
}
/**
@@ -1046,7 +1077,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
- IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ IXGBE_EE_CTRL_4_INST_ID_SHIFT;
}
}
@@ -1115,6 +1146,9 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
DEBUGFUNC("ixgbe_led_on_generic");
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/* To turn on the LED, set mode to ON. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
@@ -1135,6 +1169,9 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
DEBUGFUNC("ixgbe_led_off_generic");
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/* To turn off the LED, set mode to OFF. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
@@ -2851,7 +2888,7 @@ out:
* advertised settings
**/
s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
- u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
{
if ((!(adv_reg)) || (!(lp_reg))) {
ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
@@ -3323,7 +3360,7 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
**/
s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
{
- int secrxreg;
+ u32 secrxreg;
DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
@@ -3370,6 +3407,9 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
DEBUGFUNC("ixgbe_blink_led_start_generic");
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/*
* Link must be up to auto-blink the LEDs;
* Force it if link is down.
@@ -3415,6 +3455,10 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
DEBUGFUNC("ixgbe_blink_led_stop_generic");
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
+
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
if (ret_val != IXGBE_SUCCESS)
goto out;
@@ -3887,7 +3931,8 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
vfta_delta = 1 << (vlan % 32);
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
- /* vfta_delta represents the difference between the current value
+ /*
+ * vfta_delta represents the difference between the current value
* of vfta and the value we want in the register. Since the diff
* is an XOR mask we can just update the vfta using an XOR
*/
@@ -3920,7 +3965,7 @@ vfta_update:
* @vind: VMDq output index that maps queue to VLAN id in VLVFB
* @vlan_on: boolean flag to turn on/off VLAN in VLVF
* @vfta_delta: pointer to the difference between the current value of VFTA
- * and the desired value
+ * and the desired value
* @vfta: the desired value of the VFTA
* @vlvf_bypass: boolean flag indicating updating default pool is okay
*
@@ -3947,6 +3992,7 @@ s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
*/
if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
return IXGBE_SUCCESS;
+
vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
if (vlvf_index < 0)
return vlvf_index;
@@ -3976,6 +4022,7 @@ s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
return IXGBE_SUCCESS;
}
+
/* If there are still bits set in the VLVFB registers
* for the VLAN ID indicated we need to see if the
* caller is requesting that we clear the VFTA entry bit.
@@ -4025,6 +4072,32 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
+ * @hw: pointer to hardware structure
+ *
+ * Contains the logic to identify if we need to verify link for the
+ * crosstalk fix
+ **/
+static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
+{
+
+ /* Does FW say we need the fix */
+ if (!hw->need_crosstalk_fix)
+ return false;
+
+ /* Only consider SFP+ PHYs i.e. media type fiber */
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_fiber_qsfp:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
* ixgbe_check_mac_link_generic - Determine link and speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -4041,6 +4114,35 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
DEBUGFUNC("ixgbe_check_mac_link_generic");
+ /* If Crosstalk fix enabled do the sanity check of making sure
+ * the SFP+ cage is full.
+ */
+ if (ixgbe_need_crosstalk_fix(hw)) {
+ u32 sfp_cage_full;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP0;
+ break;
+ default:
+ /* sanity check - No SFP+ devices here */
+ sfp_cage_full = false;
+ break;
+ }
+
+ if (!sfp_cage_full) {
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ return IXGBE_SUCCESS;
+ }
+ }
+
/* clear the old state */
links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
@@ -4087,6 +4189,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ }
+ break;
default:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h
index cd042375..66dd5659 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.h
+++ b/drivers/net/ixgbe/base/ixgbe_common.h
@@ -133,7 +133,7 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on, bool vlvf_bypass);
+ u32 vind, bool vlan_on, bool vlvf_bypass);
s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, u32 *vfta_delta, u32 vfta,
bool vlvf_bypass);
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h
index d775142d..7556a818 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.h
@@ -90,6 +90,7 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
+ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -109,9 +110,9 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
/* mailbox API, version 1.2 VF requests */
-#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
-#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
-#define IXGBE_VF_UPDATE_XCAST_MODE 0x0C
+#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */
+#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
+#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
/* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index 06d1ee1c..77f0af51 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -100,6 +100,7 @@ enum {
#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
#define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i)
#define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i)
+#define IXGBE_BE32_TO_CPU(_i) rte_be_to_cpu_32(_i)
typedef uint8_t u8;
typedef int8_t s8;
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index ed1b14f3..43c55d74 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -283,6 +283,39 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_probe_phy - Probe a single address for a PHY
+ * @hw: pointer to hardware structure
+ * @phy_addr: PHY address to probe
+ *
+ * Returns true if PHY found
+ */
+static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
+{
+ u16 ext_ability = 0;
+
+ if (!ixgbe_validate_phy_addr(hw, phy_addr))
+ return false;
+
+ if (ixgbe_get_phy_id(hw))
+ return false;
+
+ hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id);
+
+ if (hw->phy.type == ixgbe_phy_unknown) {
+ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+ if (ext_ability &
+ (IXGBE_MDIO_PHY_10GBASET_ABILITY |
+ IXGBE_MDIO_PHY_1000BASET_ABILITY))
+ hw->phy.type = ixgbe_phy_cu_unknown;
+ else
+ hw->phy.type = ixgbe_phy_generic;
+ }
+
+ return true;
+}
+
+/**
* ixgbe_identify_phy_generic - Get physical layer module
* @hw: pointer to hardware structure
*
@@ -291,8 +324,7 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
- u32 phy_addr;
- u16 ext_ability = 0;
+ u16 phy_addr;
DEBUGFUNC("ixgbe_identify_phy_generic");
@@ -303,45 +335,33 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
}
- if (hw->phy.type == ixgbe_phy_unknown) {
- for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
- if (ixgbe_validate_phy_addr(hw, phy_addr)) {
- hw->phy.addr = phy_addr;
- ixgbe_get_phy_id(hw);
- hw->phy.type =
- ixgbe_get_phy_type_from_id(hw->phy.id);
-
- if (hw->phy.type == ixgbe_phy_unknown) {
- hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PHY_EXT_ABILITY,
- IXGBE_MDIO_PMA_PMD_DEV_TYPE,
- &ext_ability);
- if (ext_ability &
- (IXGBE_MDIO_PHY_10GBASET_ABILITY |
- IXGBE_MDIO_PHY_1000BASET_ABILITY))
- hw->phy.type =
- ixgbe_phy_cu_unknown;
- else
- hw->phy.type =
- ixgbe_phy_generic;
- }
+ if (hw->phy.type != ixgbe_phy_unknown)
+ return IXGBE_SUCCESS;
- status = IXGBE_SUCCESS;
- break;
- }
- }
+ if (hw->phy.nw_mng_if_sel) {
+ phy_addr = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ if (ixgbe_probe_phy(hw, phy_addr))
+ return IXGBE_SUCCESS;
+ else
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ }
- /* Certain media types do not have a phy so an address will not
- * be found and the code will take this path. Caller has to
- * decide if it is an error or not.
- */
- if (status != IXGBE_SUCCESS) {
- hw->phy.addr = 0;
+ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+ if (ixgbe_probe_phy(hw, phy_addr)) {
+ status = IXGBE_SUCCESS;
+ break;
}
- } else {
- status = IXGBE_SUCCESS;
}
+ /* Certain media types do not have a phy so an address will not
+ * be found and the code will take this path. Caller has to
+ * decide if it is an error or not.
+ */
+ if (status != IXGBE_SUCCESS)
+ hw->phy.addr = 0;
+
return status;
}
@@ -452,9 +472,11 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
phy_type = ixgbe_phy_nl;
break;
case X557_PHY_ID:
+ case X557_PHY_ID2:
phy_type = ixgbe_phy_x550em_ext_t;
break;
case IXGBE_M88E1500_E_PHY_ID:
+ case IXGBE_M88E1543_E_PHY_ID:
phy_type = ixgbe_phy_m88;
break;
default:
@@ -719,7 +741,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
DEBUGFUNC("ixgbe_write_phy_reg_generic");
if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) {
- status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
} else {
@@ -882,6 +904,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_100_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+ if (speed & IXGBE_LINK_SPEED_10_FULL)
+ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL;
+
/* Setup link based on the new speed settings */
ixgbe_setup_phy_link(hw);
@@ -919,6 +944,7 @@ static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw)
hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL;
break;
default:
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h
index 281f9faf..da14abcd 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.h
+++ b/drivers/net/ixgbe/base/ixgbe_phy.h
@@ -92,8 +92,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
-#define IXGBE_CS4223_PHY_ID 0x7003/* Quad port */
-#define IXGBE_CS4227_PHY_ID 0x3003/* Dual port */
+#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
+#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
@@ -154,6 +154,73 @@ POSSIBILITY OF SUCH DAMAGE.
/* SFP+ SFF-8472 Compliance */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
+/* More phy definitions */
+#define IXGBE_M88E1500_COPPER_CTRL 0 /* Page 0 reg */
+#define IXGBE_M88E1500_COPPER_CTRL_RESET (1u << 15)
+#define IXGBE_M88E1500_COPPER_CTRL_AN_EN (1u << 12)
+#define IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN (1u << 11)
+#define IXGBE_M88E1500_COPPER_CTRL_RESTART_AN (1u << 9)
+#define IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX (1u << 8)
+#define IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB (1u << 6)
+#define IXGBE_M88E1500_COPPER_STATUS 1 /* Page 0 reg */
+#define IXGBE_M88E1500_COPPER_STATUS_AN_DONE (1u << 5)
+#define IXGBE_M88E1500_COPPER_AN 4 /* Page 0 reg */
+#define IXGBE_M88E1500_COPPER_AN_AS_PAUSE (1u << 11)
+#define IXGBE_M88E1500_COPPER_AN_PAUSE (1u << 10)
+#define IXGBE_M88E1500_COPPER_AN_T4 (1u << 9)
+#define IXGBE_M88E1500_COPPER_AN_100TX_FD (1u << 8)
+#define IXGBE_M88E1500_COPPER_AN_100TX_HD (1u << 7)
+#define IXGBE_M88E1500_COPPER_AN_10TX_FD (1u << 6)
+#define IXGBE_M88E1500_COPPER_AN_10TX_HD (1u << 5)
+#define IXGBE_M88E1500_COPPER_AN_LP_ABILITY 5 /* Page 0 reg */
+#define IXGBE_M88E1500_COPPER_AN_LP_AS_PAUSE (1u << 11)
+#define IXGBE_M88E1500_COPPER_AN_LP_PAUSE (1u << 10)
+#define IXGBE_M88E1500_1000T_CTRL 9 /* Page 0 reg */
+/* 1=Configure PHY as Master 0=Configure PHY as Slave */
+#define IXGBE_M88E1500_1000T_CTRL_MS_VALUE (1u << 11)
+#define IXGBE_M88E1500_1000T_CTRL_1G_FD (1u << 9)
+/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
+#define IXGBE_M88E1500_1000T_CTRL_MS_ENABLE (1u << 12)
+#define IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX (1u << 9)
+#define IXGBE_M88E1500_1000T_CTRL_HALF_DUPLEX (1u << 8)
+#define IXGBE_M88E1500_1000T_STATUS 10 /* Page 0 reg */
+#define IXGBE_M88E1500_AUTO_COPPER_SGMII 0x2
+#define IXGBE_M88E1500_AUTO_COPPER_BASEX 0x3
+#define IXGBE_M88E1500_STATUS_LINK (1u << 2) /* Interface Link Bit */
+#define IXGBE_M88E1500_MAC_CTRL_1 16 /* Page 0 reg */
+#define IXGBE_M88E1500_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
+#define IXGBE_M88E1500_MAC_CTRL_1_DWN_SHIFT 12
+#define IXGBE_M88E1500_MAC_CTRL_1_DWN_4X 3u
+#define IXGBE_M88E1500_MAC_CTRL_1_ED_SHIFT 8
+#define IXGBE_M88E1500_MAC_CTRL_1_ED_TM 3u
+#define IXGBE_M88E1500_MAC_CTRL_1_MDIX_SHIFT 5
+#define IXGBE_M88E1500_MAC_CTRL_1_MDIX_AUTO 3u
+#define IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN (1u << 2)
+#define IXGBE_M88E1500_PHY_SPEC_STATUS 17 /* Page 0 reg */
+#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_SHIFT 14
+#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_MASK 3u
+#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_10 0u
+#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_100 1u
+#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_1000 2u
+#define IXGBE_M88E1500_PHY_SPEC_STATUS_DUPLEX (1u << 13)
+#define IXGBE_M88E1500_PHY_SPEC_STATUS_RESOLVED (1u << 11)
+#define IXGBE_M88E1500_PHY_SPEC_STATUS_LINK (1u << 10)
+#define IXGBE_M88E1500_PAGE_ADDR 22 /* All pages reg */
+#define IXGBE_M88E1500_FIBER_CTRL 0 /* Page 1 reg */
+#define IXGBE_M88E1500_FIBER_CTRL_RESET (1u << 15)
+#define IXGBE_M88E1500_FIBER_CTRL_SPEED_LSB (1u << 13)
+#define IXGBE_M88E1500_FIBER_CTRL_AN_EN (1u << 12)
+#define IXGBE_M88E1500_FIBER_CTRL_POWER_DOWN (1u << 11)
+#define IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL (1u << 8)
+#define IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB (1u << 6)
+#define IXGBE_M88E1500_MAC_SPEC_CTRL 16 /* Page 2 reg */
+#define IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN (1u << 3)
+#define IXGBE_M88E1500_EEE_CTRL_1 0 /* Page 18 reg */
+#define IXGBE_M88E1500_EEE_CTRL_1_MS (1u << 0) /* EEE Master/Slave */
+#define IXGBE_M88E1500_GEN_CTRL 20 /* Page 18 reg */
+#define IXGBE_M88E1500_GEN_CTRL_RESET (1u << 15)
+#define IXGBE_M88E1500_GEN_CTRL_MODE_SGMII_COPPER 1u /* Mode bits 0-2 */
+
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
index 83818a96..4982e035 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -105,11 +105,11 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
-#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
-#define IXGBE_SUBDEV_ID_82599_SFP_LOM 0x06EE
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
@@ -565,6 +565,13 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */
#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */
+/* masks for accessing VXLAN and GENEVE UDP ports */
+#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */
+#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */
+#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */
+
+#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16
+
#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
/* Ext Flexible Host Filter Table */
#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100))
@@ -1644,13 +1651,14 @@ struct ixgbe_dmac_config {
#define X550_PHY_ID2 0x01540223
#define X550_PHY_ID3 0x01540221
#define X557_PHY_ID 0x01540240
+#define X557_PHY_ID2 0x01540250
#define AQ_FW_REV 0x20
#define QT2022_PHY_ID 0x0043A400
#define ATH_PHY_ID 0x03429050
/* PHY Types */
-#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0
-#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0
+#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0
+#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0
/* Special PHY Init Routine */
#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
@@ -1765,6 +1773,8 @@ enum {
#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
/* VMOLR bitmasks */
+#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */
+#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */
#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */
#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */
#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */
@@ -2203,6 +2213,7 @@ enum {
#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINKS_SPEED_10_X550EM_A 0x00000000
#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */
#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */
@@ -2335,7 +2346,9 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
#define IXGBE_DEVICE_CAPS 0x2C
-#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04
+
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2780,6 +2793,7 @@ enum {
#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */
#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */
#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_GENEVE 0x00000800 /* GENEVE hdr present */
#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */
#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */
#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */
@@ -3054,7 +3068,7 @@ enum ixgbe_fdir_pballoc_type {
#ifdef C99
#pragma pack(push, 1)
#else
-#pragma pack(1)
+#pragma pack (1)
#endif /* C99 */
struct ixgbe_hic_hdr {
@@ -3136,13 +3150,13 @@ struct ixgbe_hic_internal_phy_req {
u8 command_type;
__be16 address;
u16 rsv1;
- __le32 write_data;
+ __be32 write_data;
u16 pad;
};
struct ixgbe_hic_internal_phy_resp {
struct ixgbe_hic_hdr hdr;
- __le32 read_data;
+ __be32 read_data;
};
#ifdef C99
@@ -3305,7 +3319,7 @@ typedef u32 ixgbe_autoneg_advertised;
/* Link speed */
typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_UNKNOWN 0
-#define IXGBE_LINK_SPEED_10_FULL 0x0004
+#define IXGBE_LINK_SPEED_10_FULL 0x0002
#define IXGBE_LINK_SPEED_100_FULL 0x0008
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400
@@ -3840,6 +3854,7 @@ struct ixgbe_mac_operations {
void (*init_swfw_sync)(struct ixgbe_hw *);
s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
+ s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -3883,6 +3898,8 @@ struct ixgbe_mac_operations {
s32 (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
+ s32 (*set_rlpml)(struct ixgbe_hw *, u16);
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *);
@@ -4078,6 +4095,7 @@ struct ixgbe_hw {
bool force_full_reset;
bool allow_unsupported_sfp;
bool wol_enabled;
+ bool need_crosstalk_fix;
};
#define ixgbe_call_func(hw, func, params, error) \
@@ -4136,16 +4154,35 @@ struct ixgbe_hw {
#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
+#define IXGBE_KRM_PCS_KX_AN_LP(P) ((P) ? 0x991C : 0x591C)
#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR (1u << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN (1u << 25)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN (1u << 26)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN (1u << 27)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M (1u << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28)
+#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART (1u << 31)
+
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
@@ -4166,9 +4203,14 @@ struct ixgbe_hw {
#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28)
#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29)
-
+#define IXGBE_KRM_PCS_KX_AN_SYM_PAUSE (1 << 1)
+#define IXGBE_KRM_PCS_KX_AN_ASM_PAUSE (1 << 2)
+#define IXGBE_KRM_PCS_KX_AN_LP_SYM_PAUSE (1 << 2)
+#define IXGBE_KRM_PCS_KX_AN_LP_ASM_PAUSE (1 << 3)
+#define IXGBE_KRM_AN_CNTL_4_ECSR_AN37_OVER_73 (1 << 29)
#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0)
#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1)
+
#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10)
#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11)
@@ -4207,11 +4249,18 @@ struct ixgbe_hw {
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_NW_MNG_IF_SEL 0x00011178
-#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1 << 1)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1u << 1)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_IF_MODE (1u << 2)
+#define IXGBE_NW_MNG_IF_SEL_EN_SHARED_MDIO (1u << 13)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M (1u << 17)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M (1u << 18)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20)
+#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21)
#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M (1 << 23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24)
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
- (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
+ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c
index 26c0d81c..e9c13f23 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -64,6 +64,7 @@ s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
hw->mac.ops.get_bus_info = NULL;
+ hw->mac.ops.negotiate_api_version = ixgbevf_negotiate_api_version;
/* Link */
hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
@@ -75,10 +76,12 @@ s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
hw->mac.ops.init_rx_addrs = NULL;
hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
+ hw->mac.ops.update_xcast_mode = ixgbevf_update_xcast_mode;
hw->mac.ops.enable_mc = NULL;
hw->mac.ops.disable_mc = NULL;
hw->mac.ops.clear_vfta = NULL;
hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
+ hw->mac.ops.set_rlpml = ixgbevf_rlpml_set_vf;
hw->mac.max_tx_queues = 1;
hw->mac.max_rx_queues = 1;
@@ -322,15 +325,16 @@ STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
return vector;
}
-STATIC void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
- u32 *msg, u16 size)
+STATIC s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
+ u32 *retmsg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- u32 retmsg[IXGBE_VFMAILBOX_SIZE];
s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
- if (!retval)
- mbx->ops.read_posted(hw, retmsg, size, 0);
+ if (retval)
+ return retval;
+
+ return mbx->ops.read_posted(hw, retmsg, size, 0);
}
/**
@@ -344,7 +348,6 @@ STATIC void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
@@ -353,10 +356,7 @@ s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
memset(msgbuf, 0, 12);
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
memcpy(msg_addr, addr, 6);
- ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
-
- if (!ret_val)
- ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -419,17 +419,51 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
}
/**
+ * ixgbevf_update_xcast_mode - Update Multicast mode
+ * @hw: pointer to the HW structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ **/
+s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ u32 msgbuf[2];
+ s32 err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
+ break;
+ default:
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ }
+
+ msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+ msgbuf[1] = xcast_mode;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (err)
+ return err;
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_set_vfta_vf - Set/Unset vlan filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers
* @vlan_on: if true then set bit, else clear bit
* @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
s32 ret_val;
UNREFERENCED_2PARAMETER(vind, vlvf_bypass);
@@ -439,10 +473,7 @@ s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
- ret_val = mbx->ops.write_posted(hw, msgbuf, 2, 0);
- if (!ret_val)
- ret_val = mbx->ops.read_posted(hw, msgbuf, 1, 0);
-
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
return IXGBE_SUCCESS;
@@ -489,7 +520,6 @@ s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3], msgbuf_chk;
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
@@ -506,10 +536,8 @@ s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
msgbuf_chk = msgbuf[0];
if (addr)
memcpy(msg_addr, addr, 6);
- ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
- if (!ret_val)
- ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -628,13 +656,22 @@ out:
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
+ s32 retval;
msgbuf[0] = IXGBE_VF_SET_LPE;
msgbuf[1] = max_size;
- ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+
+ retval = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (retval)
+ return retval;
+ if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
+ return IXGBE_ERR_MBX;
+
+ return 0;
}
/**
@@ -651,11 +688,8 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
msg[0] = IXGBE_VF_API_NEGOTIATE;
msg[1] = api;
msg[2] = 0;
- err = hw->mbx.ops.write_posted(hw, msg, 3, 0);
-
- if (!err)
- err = hw->mbx.ops.read_posted(hw, msg, 3, 0);
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -681,6 +715,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
switch (hw->api_version) {
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
break;
default:
return 0;
@@ -689,11 +724,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
/* Fetch queue configuration from the PF */
msg[0] = IXGBE_VF_GET_QUEUES;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
- err = hw->mbx.ops.write_posted(hw, msg, 5, 0);
-
- if (!err)
- err = hw->mbx.ops.read_posted(hw, msg, 5, 0);
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.h b/drivers/net/ixgbe/base/ixgbe_vf.h
index 8851cb82..d288f31a 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.h
+++ b/drivers/net/ixgbe/base/ixgbe_vf.h
@@ -131,9 +131,10 @@ s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr,
bool clear);
+s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode);
s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, bool vlvf_bypass);
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
+s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c
index 31dead0d..6e778bc9 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.c
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -271,6 +271,7 @@ mac_reset_top:
if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
@@ -783,7 +784,6 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw),
swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- msec_delay(5);
return IXGBE_SUCCESS;
}
/* Firmware currently using resource (fwmask), hardware
@@ -860,7 +860,7 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- msec_delay(5);
+ msec_delay(2);
}
/**
@@ -982,6 +982,9 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
DEBUGFUNC("ixgbe_blink_led_start_X540");
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/*
* Link should be up in order for the blink bit in the LED control
* register to work. Force link and speed in the MAC if link is down.
@@ -1016,6 +1019,9 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
u32 macc_reg;
u32 ledctl_reg;
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
DEBUGFUNC("ixgbe_blink_led_stop_X540");
/* Restore the LED to its default value. */
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
index e78c9c2c..acb8140c 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -41,6 +41,7 @@ POSSIBILITY OF SUCH DAMAGE.
STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed);
STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask);
+STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw);
/**
* ixgbe_init_ops_X550 - Inits func ptrs and MAC type
@@ -342,11 +343,10 @@ STATIC s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
UNREFERENCED_1PARAMETER(dev_type);
/* Setup and write the read command */
- command = (reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ |
- IXGBE_MSCA_MDI_COMMAND;
+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC |
+ IXGBE_MSCA_MDI_COMMAND;
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -393,11 +393,10 @@ STATIC s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
/* Setup and write the write command */
- command = (reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
- IXGBE_MSCA_MDI_COMMAND;
+ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
@@ -423,43 +422,6 @@ STATIC s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr,
}
/**
- * ixgbe_identify_phy_1g - Get 1g PHY type based on device id
- * @hw: pointer to hardware structure
- *
- * Returns error code
- */
-STATIC s32 ixgbe_identify_phy_1g(struct ixgbe_hw *hw)
-{
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
- u16 phy_id_high;
- u16 phy_id_low;
- s32 rc;
-
- rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
- if (rc)
- return rc;
-
- rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_MDIO_PHY_ID_HIGH, 0,
- &phy_id_high);
- if (rc)
- goto rel_out;
-
- rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_MDIO_PHY_ID_LOW, 0,
- &phy_id_low);
- if (rc)
- goto rel_out;
-
- hw->phy.id = (u32)phy_id_high << 16;
- hw->phy.id |= phy_id_low & IXGBE_PHY_REVISION_MASK;
- hw->phy.revision = (u32)phy_id_low & ~IXGBE_PHY_REVISION_MASK;
-
-rel_out:
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
-
- return rc;
-}
-
-/**
* ixgbe_identify_phy_x550em - Get PHY type based on device id
* @hw: pointer to hardware structure
*
@@ -467,18 +429,15 @@ rel_out:
*/
STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
+ hw->mac.ops.set_lan_id(hw);
+
+ ixgbe_read_mng_if_sel_x550em(hw);
+
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
- hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
- hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
- if (hw->bus.lan_id)
- hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
- else
- hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_SFP:
/* set up for CS4227 usage */
- hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
ixgbe_check_cs4227(hw);
/* Fallthrough */
@@ -494,19 +453,12 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
- case IXGBE_DEV_ID_X550EM_X_1G_T:
- case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
- return ixgbe_identify_phy_generic(hw);
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
- hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
- hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
- if (hw->bus.lan_id)
- hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
- else
- hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
- return ixgbe_identify_phy_1g(hw);
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ return ixgbe_identify_phy_generic(hw);
default:
break;
}
@@ -601,7 +553,6 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
struct ixgbe_phy_info *phy = &hw->phy;
- struct ixgbe_link_info *link = &hw->link;
s32 ret_val;
DEBUGFUNC("ixgbe_init_ops_X550EM");
@@ -637,25 +588,6 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
hw->bus.type = ixgbe_bus_type_internal;
mac->ops.get_bus_info = ixgbe_get_bus_info_X550em;
- if (hw->mac.type == ixgbe_mac_X550EM_x) {
- mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
- mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
- mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
- mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
- link->ops.read_link = ixgbe_read_i2c_combined_generic;
- link->ops.read_link_unlocked =
- ixgbe_read_i2c_combined_generic_unlocked;
- link->ops.write_link = ixgbe_write_i2c_combined_generic;
- link->ops.write_link_unlocked =
- ixgbe_write_i2c_combined_generic_unlocked;
- link->addr = IXGBE_CS4227;
- }
- if (hw->mac.type == ixgbe_mac_X550EM_a) {
- mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
- mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
- mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
- mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
- }
mac->ops.get_media_type = ixgbe_get_media_type_X550em;
mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em;
@@ -666,10 +598,6 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
mac->ops.setup_fc = ixgbe_setup_fc_generic;
- else if (hw->mac.type == ixgbe_mac_X550EM_a) {
- mac->ops.setup_fc = ixgbe_setup_fc_x550a;
- mac->ops.fc_autoneg = ixgbe_fc_autoneg_x550a;
- }
else
mac->ops.setup_fc = ixgbe_setup_fc_X550em;
@@ -703,6 +631,88 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
}
/**
+* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
+* @hw: pointer to hardware structure
+*
+* Initialize the function pointers and for MAC type X550EM_a.
+* Does not touch the hardware.
+**/
+s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550EM_a");
+
+ /* Start with generic X550EM init */
+ ret_val = ixgbe_init_ops_X550EM(hw);
+
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+ } else {
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
+ }
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
+
+ switch (mac->ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ mac->ops.setup_fc = NULL;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a;
+ break;
+ case ixgbe_media_type_backplane:
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a;
+ mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a;
+ break;
+ default:
+ break;
+ }
+
+ if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
+ (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
+ mac->ops.setup_fc = ixgbe_setup_fc_sgmii_x550em_a;
+ }
+
+ return ret_val;
+}
+
+/**
+* ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type
+* @hw: pointer to hardware structure
+*
+* Initialize the function pointers and for MAC type X550EM_x.
+* Does not touch the hardware.
+**/
+s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_link_info *link = &hw->link;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_init_ops_X550EM_x");
+
+ /* Start with generic X550EM init */
+ ret_val = ixgbe_init_ops_X550EM(hw);
+
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
+ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em;
+ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em;
+ link->ops.read_link = ixgbe_read_i2c_combined_generic;
+ link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked;
+ link->ops.write_link = ixgbe_write_i2c_combined_generic;
+ link->ops.write_link_unlocked =
+ ixgbe_write_i2c_combined_generic_unlocked;
+ link->addr = IXGBE_CS4227;
+
+ return ret_val;
+}
+
+/**
* ixgbe_dmac_config_X550
* @hw: pointer to hardware structure
*
@@ -765,6 +775,7 @@ s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw)
/* Configure DMA coalescing enabled */
switch (hw->mac.dmac_config.link_speed) {
+ case IXGBE_LINK_SPEED_10_FULL:
case IXGBE_LINK_SPEED_100_FULL:
pb_headroom = IXGBE_DMACRXT_100M;
break;
@@ -895,20 +906,20 @@ STATIC s32 ixgbe_enable_eee_x550(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
if (status != IXGBE_SUCCESS)
return status;
link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
+ IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
/* Don't advertise FEC capability when EEE enabled. */
link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
if (status != IXGBE_SUCCESS)
return status;
break;
@@ -950,8 +961,8 @@ STATIC s32 ixgbe_disable_eee_x550(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
if (status != IXGBE_SUCCESS)
return status;
@@ -962,8 +973,8 @@ STATIC s32 ixgbe_disable_eee_x550(struct ixgbe_hw *hw)
link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
if (status != IXGBE_SUCCESS)
return status;
break;
@@ -1001,6 +1012,7 @@ s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
!(IXGBE_FUSES0_REV_MASK &
IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
return IXGBE_SUCCESS;
+
status = ixgbe_enable_eee_x550(hw);
if (status)
return status;
@@ -1102,8 +1114,8 @@ STATIC s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl)
}
/**
- * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
- * device
+ * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register
+ * of the IOSF device
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
* @device_type: 3 bit device type
@@ -1149,12 +1161,11 @@ out:
}
/**
- * ixgbe_read_iosf_sb_reg_x550 - Writes a value to specified register of the IOSF
- * device
+ * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
* @device_type: 3 bit device type
- * @phy_data: Pointer to read data from the register
+ * @data: Pointer to read data from the register
**/
s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data)
@@ -1278,7 +1289,7 @@ s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
write_cmd.port_number = hw->bus.lan_id;
write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
- write_cmd.write_data = IXGBE_CPU_TO_LE32(data);
+ write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
sizeof(write_cmd),
@@ -1288,8 +1299,7 @@ s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
}
/**
- * ixgbe_read_iosf_sb_reg_x550a - Writes a value to specified register
- * of the IOSF device.
+ * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
* @device_type: 3 bit device type
@@ -1318,7 +1328,7 @@ s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
IXGBE_HI_COMMAND_TIMEOUT, true);
/* Extract the register value from the response. */
- *data = IXGBE_LE32_TO_CPU(hic.rsp.read_data);
+ *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
return status;
}
@@ -1506,7 +1516,6 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
media_type = ixgbe_media_type_copper;
- hw->phy.type = ixgbe_phy_m88;
break;
default:
media_type = ixgbe_media_type_unknown;
@@ -1599,16 +1608,62 @@ s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw)
}
/**
+* ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the
+* internal PHY
+* @hw: pointer to hardware structure
+**/
+STATIC s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u32 link_ctrl;
+
+ /* Restart auto-negotiation. */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
+
+ if (status) {
+ DEBUGOUT("Auto-negotiation did not complete\n");
+ return status;
+ }
+
+ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
+
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ u32 flx_mask_st20;
+
+ /* Indicate to FW that AN restart has been asserted */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20);
+
+ if (status) {
+ DEBUGOUT("Auto-negotiation did not complete\n");
+ return status;
+ }
+
+ flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART;
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20);
+ }
+
+ return status;
+}
+
+/**
* ixgbe_setup_sgmii - Set up link for sgmii
* @hw: pointer to hardware structure
*/
STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait)
{
struct ixgbe_mac_info *mac = &hw->mac;
- u32 lval, sval;
+ u32 lval, sval, flx_val;
s32 rc;
- UNREFERENCED_2PARAMETER(speed, autoneg_wait_to_complete);
rc = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -1641,12 +1696,100 @@ STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
if (rc)
return rc;
- lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+ if (rc)
+ return rc;
+
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_restart_an_internal_phy_x550em(hw);
+ if (rc)
+ return rc;
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_setup_sgmii_m88 - Set up link for sgmii with Marvell PHYs
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_setup_sgmii_m88(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval, flx_val;
+ s32 rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
rc = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
- return rc;
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val);
+ if (rc)
+ return rc;
+
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val);
+ if (rc)
+ return rc;
+
+ rc = ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
}
/**
@@ -1670,17 +1813,28 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
mac->ops.set_rate_select_speed =
ixgbe_set_soft_rate_select_speed;
+
if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ||
(hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP))
mac->ops.setup_mac_link =
- ixgbe_setup_mac_link_sfp_x550a;
+ ixgbe_setup_mac_link_sfp_x550a;
else
mac->ops.setup_mac_link =
- ixgbe_setup_mac_link_sfp_x550em;
+ ixgbe_setup_mac_link_sfp_x550em;
break;
case ixgbe_media_type_copper:
- mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
- mac->ops.check_link = ixgbe_check_link_t_X550em;
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
+ mac->ops.setup_link = ixgbe_setup_sgmii_m88;
+ } else {
+ mac->ops.setup_link =
+ ixgbe_setup_mac_link_t_X550em;
+ }
+ } else {
+ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.check_link = ixgbe_check_link_t_X550em;
+ }
break;
case ixgbe_media_type_backplane:
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
@@ -1704,6 +1858,7 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
{
DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
+
/* SFP */
if (hw->phy.media_type == ixgbe_media_type_fiber) {
@@ -1728,12 +1883,27 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
} else {
switch (hw->phy.type) {
case ixgbe_phy_m88:
- *speed = IXGBE_LINK_SPEED_100_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
+ *speed = IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_10_FULL;
break;
case ixgbe_phy_sgmii:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
+ case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ /* check different backplane modes */
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ }
+ }
+ /* fall through */
default:
*speed = IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
@@ -1935,8 +2105,8 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
u32 reg_val;
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
return status;
@@ -1952,181 +2122,289 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX;
- /* Restart auto-negotiation. */
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
- return status;
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ /* Set lane mode to KR auto negotiation */
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+
+ if (status)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ }
+
+ return ixgbe_restart_an_internal_phy_x550em(hw);
}
/**
- * ixgbe_set_master_slave_mode - Set up PHY for master/slave mode
+ * ixgbe_setup_m88 - setup m88 PHY
* @hw: pointer to hardware structure
- *
- * Must be called while holding the PHY semaphore and token
*/
-STATIC s32 ixgbe_set_master_slave_mode(struct ixgbe_hw *hw)
+STATIC s32 ixgbe_setup_m88(struct ixgbe_hw *hw)
{
- u16 phy_data;
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+ u16 reg;
s32 rc;
- /* Resolve master/slave mode */
- rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_M88E1500_1000T_CTRL, 0,
- &phy_data);
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return IXGBE_SUCCESS;
+
+ rc = hw->mac.ops.acquire_swfw_sync(hw, mask);
if (rc)
return rc;
- /* load defaults for future use */
- if (phy_data & IXGBE_M88E1500_1000T_CTRL_MS_ENABLE) {
- if (phy_data & IXGBE_M88E1500_1000T_CTRL_MS_VALUE)
- hw->phy.original_ms_type = ixgbe_ms_force_master;
- else
- hw->phy.original_ms_type = ixgbe_ms_force_slave;
- } else {
- hw->phy.original_ms_type = ixgbe_ms_auto;
+ rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
+ if (rc)
+ goto out;
+ if (reg & IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN) {
+ reg &= ~IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN;
+ hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
+ reg);
}
- switch (hw->phy.ms_type) {
- case ixgbe_ms_force_master:
- phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_ENABLE;
- phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_VALUE;
- break;
- case ixgbe_ms_force_slave:
- phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_ENABLE;
- phy_data &= ~IXGBE_M88E1500_1000T_CTRL_MS_VALUE;
- break;
- case ixgbe_ms_auto:
- phy_data &= ~IXGBE_M88E1500_1000T_CTRL_MS_ENABLE;
- break;
- default:
- break;
+ rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0, &reg);
+ if (rc)
+ goto out;
+ if (reg & IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN) {
+ reg &= ~IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN;
+ hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0,
+ reg);
}
- return ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_1000T_CTRL, 0,
- phy_data);
-}
-
-/**
- * ixgbe_reset_phy_m88_nolock - Reset m88 PHY without locking
- * @hw: pointer to hardware structure
- *
- * Must be called while holding the PHY semaphore and token
- */
-STATIC s32 ixgbe_reset_phy_m88_nolock(struct ixgbe_hw *hw)
-{
- s32 rc;
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 2);
+ if (rc)
+ goto out;
- rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
+ rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_MAC_SPEC_CTRL, 0,
+ &reg);
if (rc)
- return rc;
+ goto out;
+ if (reg & IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN) {
+ reg &= ~IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN;
+ hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_SPEC_CTRL, 0,
+ reg);
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0,
+ 0);
+ if (rc)
+ goto out;
+ rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
+ &reg);
+ if (rc)
+ goto out;
+ reg |= IXGBE_M88E1500_COPPER_CTRL_RESET;
+ hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
+ reg);
+ usec_delay(50);
+ } else {
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0,
+ 0);
+ if (rc)
+ goto out;
+ }
- rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_FIBER_CTRL, 0,
- IXGBE_M88E1500_FIBER_CTRL_RESET |
- IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
- IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB);
+ rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
if (rc)
- goto res_out;
+ goto out;
- rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 18);
+ if (!(reg & IXGBE_M88E1500_COPPER_CTRL_AN_EN)) {
+ reg |= IXGBE_M88E1500_COPPER_CTRL_AN_EN;
+ hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
+ reg);
+ }
+
+ rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_1000T_CTRL, 0, &reg);
if (rc)
- goto res_out;
+ goto out;
+ reg &= ~IXGBE_M88E1500_1000T_CTRL_HALF_DUPLEX;
+ reg &= ~IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ reg |= IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX;
+ hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_1000T_CTRL, 0, reg);
- rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_GEN_CTRL, 0,
- IXGBE_M88E1500_GEN_CTRL_RESET |
- IXGBE_M88E1500_GEN_CTRL_SGMII_COPPER);
+ rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_AN, 0, &reg);
if (rc)
- goto res_out;
+ goto out;
+ reg &= ~IXGBE_M88E1500_COPPER_AN_T4;
+ reg &= ~IXGBE_M88E1500_COPPER_AN_100TX_FD;
+ reg &= ~IXGBE_M88E1500_COPPER_AN_100TX_HD;
+ reg &= ~IXGBE_M88E1500_COPPER_AN_10TX_FD;
+ reg &= ~IXGBE_M88E1500_COPPER_AN_10TX_HD;
+
+ /* Flow control auto negotiation configuration was moved from here to
+ * the function ixgbe_setup_fc_sgmii_x550em_a()
+ */
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ reg |= IXGBE_M88E1500_COPPER_AN_100TX_FD;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
+ reg |= IXGBE_M88E1500_COPPER_AN_10TX_FD;
+ hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_AN, 0, reg);
- rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
if (rc)
- goto res_out;
+ goto out;
+ reg |= IXGBE_M88E1500_COPPER_CTRL_RESTART_AN;
+ hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
+
- rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- IXGBE_M88E1500_COPPER_CTRL_RESET |
- IXGBE_M88E1500_COPPER_CTRL_AN_EN |
- IXGBE_M88E1500_COPPER_CTRL_RESTART_AN |
- IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX |
- IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB);
+ hw->mac.ops.release_swfw_sync(hw, mask);
+ return rc;
-res_out:
- ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+out:
+ hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ hw->mac.ops.release_swfw_sync(hw, mask);
return rc;
}
/**
- * ixgbe_reset_phy_m88 - Reset m88 PHY
+ * ixgbe_reset_phy_m88e1500 - Reset m88e1500 PHY
* @hw: pointer to hardware structure
+ *
+ * The PHY token must be held when calling this function.
*/
-STATIC s32 ixgbe_reset_phy_m88(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_phy_m88e1500(struct ixgbe_hw *hw)
{
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u16 reg;
s32 rc;
- if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
- return IXGBE_SUCCESS;
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ if (rc)
+ return rc;
- rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
if (rc)
return rc;
- rc = ixgbe_reset_phy_m88_nolock(hw);
+ reg |= IXGBE_M88E1500_COPPER_CTRL_RESET;
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
+
+ usec_delay(10);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
return rc;
}
/**
- * ixgbe_setup_m88 - setup m88 PHY
+ * ixgbe_reset_phy_m88e1543 - Reset m88e1543 PHY
* @hw: pointer to hardware structure
+ *
+ * The PHY token must be held when calling this function.
*/
-STATIC s32 ixgbe_setup_m88(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_phy_m88e1543(struct ixgbe_hw *hw)
{
- u32 swfw_mask = hw->phy.phy_semaphore_mask;
- struct ixgbe_phy_info *phy = &hw->phy;
- u16 phy_data;
+ return hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+}
+
+/**
+ * ixgbe_reset_phy_m88 - Reset m88 PHY
+ * @hw: pointer to hardware structure
+ */
+STATIC s32 ixgbe_reset_phy_m88(struct ixgbe_hw *hw)
+{
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+ u16 reg;
s32 rc;
- if (phy->reset_disable || ixgbe_check_reset_blocked(hw))
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
return IXGBE_SUCCESS;
- rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ rc = hw->mac.ops.acquire_swfw_sync(hw, mask);
if (rc)
return rc;
- rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_M88E1500_PHY_SPEC_CTRL, 0,
- &phy_data);
+ switch (hw->phy.id) {
+ case IXGBE_M88E1500_E_PHY_ID:
+ rc = ixgbe_reset_phy_m88e1500(hw);
+ break;
+ case IXGBE_M88E1543_E_PHY_ID:
+ rc = ixgbe_reset_phy_m88e1543(hw);
+ break;
+ default:
+ rc = IXGBE_ERR_PHY;
+ break;
+ }
+
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
if (rc)
- goto rel_out;
-
- /* Enable downshift and setting it to X6 */
- phy_data &= ~IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE;
- phy_data |= IXGBE_M88E1500_PSCR_DOWNSHIFT_6X;
- phy_data |= IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE;
- rc = ixgbe_write_phy_reg_mdi_22(hw,
- IXGBE_M88E1500_PHY_SPEC_CTRL, 0,
- phy_data);
+ goto out;
+
+ reg = IXGBE_M88E1500_FIBER_CTRL_RESET |
+ IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
+ IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB;
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_FIBER_CTRL, 0, reg);
if (rc)
- goto rel_out;
+ goto out;
- ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 18);
+ if (rc)
+ goto out;
- /* Commit the changes */
- rc = ixgbe_reset_phy_m88_nolock(hw);
- if (rc) {
- DEBUGOUT("Error committing the PHY changes\n");
- goto rel_out;
- }
+ reg = IXGBE_M88E1500_GEN_CTRL_RESET |
+ IXGBE_M88E1500_GEN_CTRL_MODE_SGMII_COPPER;
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_GEN_CTRL, 0, reg);
+ if (rc)
+ goto out;
- rc = ixgbe_set_master_slave_mode(hw);
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
+ if (rc)
+ goto out;
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
- return rc;
+ reg = IXGBE_M88E1500_FIBER_CTRL_RESET |
+ IXGBE_M88E1500_FIBER_CTRL_AN_EN |
+ IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
+ IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB;
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_FIBER_CTRL, 0, reg);
+ if (rc)
+ goto out;
-rel_out:
- ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
- hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ if (rc)
+ goto out;
+
+ reg = (IXGBE_M88E1500_MAC_CTRL_1_DWN_4X <<
+ IXGBE_M88E1500_MAC_CTRL_1_DWN_SHIFT) |
+ (IXGBE_M88E1500_MAC_CTRL_1_ED_TM <<
+ IXGBE_M88E1500_MAC_CTRL_1_ED_SHIFT) |
+ (IXGBE_M88E1500_MAC_CTRL_1_MDIX_AUTO <<
+ IXGBE_M88E1500_MAC_CTRL_1_MDIX_SHIFT);
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0, reg);
+ if (rc)
+ goto out;
+
+ reg = IXGBE_M88E1500_COPPER_CTRL_RESET |
+ IXGBE_M88E1500_COPPER_CTRL_AN_EN |
+ IXGBE_M88E1500_COPPER_CTRL_RESTART_AN |
+ IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX |
+ IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB;
+ rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
+ if (rc)
+ goto out;
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ /* In case of first reset set advertised speeds to default value */
+ if (!hw->phy.autoneg_advertised)
+ hw->phy.autoneg_advertised = IXGBE_LINK_SPEED_1GB_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_10_FULL;
+
+ return ixgbe_setup_m88(hw);
+
+out:
+ hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ hw->mac.ops.release_swfw_sync(hw, mask);
return rc;
}
@@ -2151,7 +2429,7 @@ STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) {
hw->phy.addr = (hw->phy.nw_mng_if_sel &
IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
- IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
}
return IXGBE_SUCCESS;
@@ -2172,16 +2450,42 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_phy_ops_X550em");
- hw->mac.ops.set_lan_id(hw);
-
- ixgbe_read_mng_if_sel_x550em(hw);
-
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em;
}
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
+ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
+ hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
+ hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+
+ break;
+ case IXGBE_DEV_ID_X550EM_A_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
+ hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_SFP:
+ /* set up for CS4227 usage */
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
+ break;
+ default:
+ break;
+ }
+
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
@@ -2225,8 +2529,6 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
break;
case ixgbe_phy_m88:
phy->ops.setup_link = ixgbe_setup_m88;
- phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
- phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
phy->ops.reset = ixgbe_reset_phy_m88;
break;
default:
@@ -2511,14 +2813,63 @@ s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
}
/**
+ * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode
+ * @hw: pointer to hardware structure
+ * @speed: the link speed to force
+ *
+ * Configures the integrated PHY for native SFI mode. Used to connect the
+ * internal PHY directly to an SFP cage, without autonegotiation.
+ **/
+STATIC s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ s32 status;
+ u32 reg_val;
+
+ /* Disable all AN and force speed to 10G Serial. */
+ status = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
+ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
+
+ /* Select forced link speed for internal PHY. */
+ switch (*speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ break;
+ default:
+ /* Other link speeds are not supported by internal PHY. */
+ return IXGBE_ERR_LINK_SETUP;
+ }
+
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* Toggle port SW reset by AN reset. */
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
+
+ return status;
+}
+
+/**
* ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
* @hw: pointer to hardware structure
*
* Configure the the integrated PHY for SFP support.
**/
s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 ret_val;
u16 reg_phy_ext;
@@ -2540,31 +2891,27 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
return ret_val;
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) {
- /* Configure internal PHY for native SFI */
+ /* Configure internal PHY for native SFI based on module type */
ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_phy_int);
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_phy_int);
if (ret_val != IXGBE_SUCCESS)
return ret_val;
- if (setup_linear) {
- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING;
- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR;
- } else {
- reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING;
- reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR;
- }
+ reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA;
+ if (!setup_linear)
+ reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR;
ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int);
if (ret_val != IXGBE_SUCCESS)
return ret_val;
- /* Setup XFI/SFI internal link. */
- ret_val = ixgbe_setup_ixfi_x550em(hw, &speed);
+ /* Setup SFI internal link. */
+ ret_val = ixgbe_setup_sfi_x550a(hw, &speed);
} else {
/* Configure internal PHY for KR/KX. */
ixgbe_setup_kr_speed_x550em(hw, speed);
@@ -2577,7 +2924,7 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
/* Get external PHY device id */
ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
if (ret_val != IXGBE_SUCCESS)
return ret_val;
@@ -2598,7 +2945,7 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
else
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
ret_val = hw->phy.ops.write_reg(hw, reg_slice,
- IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
+ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
}
return ret_val;
}
@@ -2611,24 +2958,25 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
**/
STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
{
+ struct ixgbe_mac_info *mac = &hw->mac;
s32 status;
u32 reg_val;
/* Disable training protocol FSM. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Disable Flex from training TXFFE. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
@@ -2636,12 +2984,12 @@ STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
@@ -2649,14 +2997,14 @@ STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN;
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN;
reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Enable override for coefficients. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
@@ -2665,7 +3013,7 @@ STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN;
reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN;
reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
return status;
@@ -2681,11 +3029,12 @@ STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw)
**/
STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
{
+ struct ixgbe_mac_info *mac = &hw->mac;
s32 status;
u32 reg_val;
/* Disable AN and force speed to 10G Serial. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
@@ -2707,7 +3056,7 @@ STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
return IXGBE_ERR_LINK_SETUP;
}
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = mac->ops.write_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
@@ -2721,15 +3070,7 @@ STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
}
/* Toggle port SW reset by AN reset. */
- status = ixgbe_read_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- if (status != IXGBE_SUCCESS)
- return status;
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
return status;
}
@@ -2847,56 +3188,56 @@ s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw)
/* Disable AN and force speed to 10G Serial. */
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G;
status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Set near-end loopback clocks. */
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B;
reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS;
status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Set loopback enable. */
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK;
status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
if (status != IXGBE_SUCCESS)
return status;
/* Training bypass. */
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status != IXGBE_SUCCESS)
return status;
reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS;
status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
return status;
}
@@ -2969,6 +3310,7 @@ s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
DEBUGOUT("EEPROM read buffer - semaphore failed\n");
return status;
}
+
while (words) {
if (words > FW_MAX_READ_BUFFER_SIZE / 2)
words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
@@ -3712,19 +4054,19 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
ret_val = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (ret_val != IXGBE_SUCCESS)
goto out;
reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
- IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
if (pause)
reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
if (asm_dir)
reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
ret_val = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* This device does not fully support AN. */
hw->fc.disable_fc_autoneg = true;
@@ -3738,12 +4080,12 @@ out:
}
/**
- * ixgbe_fc_autoneg_x550a - Enable flow control IEEE clause 37
+ * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37
* @hw: pointer to hardware structure
*
* Enable flow control according to IEEE clause 37.
**/
-void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw)
+void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw)
{
u32 link_s1, lp_an_page_low, an_cntl_1;
s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
@@ -3757,7 +4099,7 @@ void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw)
*/
if (hw->fc.disable_fc_autoneg) {
ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
- "Flow control autoneg is disabled");
+ "Flow control autoneg is disabled");
goto out;
}
@@ -3769,12 +4111,13 @@ void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw)
/* Check at auto-negotiation has completed */
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_S1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
+ IXGBE_KRM_LINK_S1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1);
if (status != IXGBE_SUCCESS ||
(link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) {
DEBUGOUT("Auto-Negotiation did not complete\n");
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
@@ -3782,8 +4125,8 @@ void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw)
* local flow control settings accordingly
*/
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1);
if (status != IXGBE_SUCCESS) {
DEBUGOUT("Auto-Negotiation did not complete\n");
@@ -3791,8 +4134,8 @@ void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw)
}
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
+ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low);
if (status != IXGBE_SUCCESS) {
DEBUGOUT("Auto-Negotiation did not complete\n");
@@ -3815,22 +4158,179 @@ out:
}
/**
- * ixgbe_setup_fc_x550em - Set up flow control
+ * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings
+ * @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
+{
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+}
+
+/**
+ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37
+ * @hw: pointer to hardware structure
+ *
+ * Enable flow control according to IEEE clause 37.
+ **/
+void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ u16 reg, pcs_an_lp, pcs_an;
+ ixgbe_link_speed speed;
+ bool link_up;
+
+ /* AN should have completed when the cable was plugged in.
+ * Look for reasons to bail out. Bail out if:
+ * - FC autoneg is disabled, or if
+ * - link is not up.
+ */
+ if (hw->fc.disable_fc_autoneg) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "Flow control autoneg is disabled");
+ goto out;
+ }
+
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (!link_up) {
+ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
+ goto out;
+ }
+
+ /* Check if auto-negotiation has completed */
+ status = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_STATUS,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
+ if (status != IXGBE_SUCCESS ||
+ (reg & IXGBE_M88E1500_COPPER_STATUS_AN_DONE) == 0) {
+ DEBUGOUT("Auto-Negotiation did not complete\n");
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Get the advertized flow control */
+ status = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_AN,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &pcs_an);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /* Get link partner's flow control */
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_M88E1500_COPPER_AN_LP_ABILITY,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &pcs_an_lp);
+ if (status != IXGBE_SUCCESS)
+ goto out;
+
+ /* Negotiate the flow control */
+ status = ixgbe_negotiate_fc(hw, (u32)pcs_an, (u32)pcs_an_lp,
+ IXGBE_M88E1500_COPPER_AN_PAUSE,
+ IXGBE_M88E1500_COPPER_AN_AS_PAUSE,
+ IXGBE_M88E1500_COPPER_AN_LP_PAUSE,
+ IXGBE_M88E1500_COPPER_AN_LP_AS_PAUSE);
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_setup_fc_sgmii_x550em_a - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ **/
+s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw)
+{
+ u16 reg;
+ s32 rc;
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Read contents of the Auto-Negotiation register, page 0 reg 4 */
+ rc = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_AN,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
+ if (rc)
+ goto out;
+
+ /* Disable all the settings related to Flow control Auto-negotiation */
+ reg &= ~IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
+ reg &= ~IXGBE_M88E1500_COPPER_AN_PAUSE;
+
+ /* Configure the Asymmetric and symmetric pause according to the user
+ * requested mode.
+ */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ reg |= IXGBE_M88E1500_COPPER_AN_PAUSE;
+ reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ reg |= IXGBE_M88E1500_COPPER_AN_PAUSE;
+ reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* Write back to the Auto-Negotiation register with newly configured
+ * fields
+ */
+ hw->phy.ops.write_reg(hw, IXGBE_M88E1500_COPPER_AN,
+ IXGBE_MDIO_ZERO_DEV_TYPE, reg);
+
+ /* In this section of the code we restart Auto-negotiation */
+
+ /* Read the CONTROL register, Page 0 reg 0 */
+ rc = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_CTRL,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
+ if (rc)
+ goto out;
+
+ /* Set the bit to restart Auto-Neg. The bit to enable Auto-neg is ON
+ * by default
+ */
+ reg |= IXGBE_M88E1500_COPPER_CTRL_RESTART_AN;
+
+ /* write the new values to the register to restart Auto-Negotiation */
+ hw->phy.ops.write_reg(hw, IXGBE_M88E1500_COPPER_CTRL,
+ IXGBE_MDIO_ZERO_DEV_TYPE, reg);
+
+out:
+ return rc;
+}
+
+/**
+ * ixgbe_setup_fc_backplane_x550em_a - Set up flow control
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
**/
-s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw)
+s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
- u32 an_cntl, link_ctrl = 0;
+ u32 an_cntl = 0;
- DEBUGFUNC("ixgbe_setup_fc_x550em");
+ DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a");
/* Validate the requested mode */
if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
- "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
return IXGBE_ERR_INVALID_LINK_SETTINGS;
}
@@ -3842,8 +4342,8 @@ s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl);
if (status != IXGBE_SUCCESS) {
DEBUGOUT("Auto-Negotiation did not complete\n");
@@ -3884,7 +4384,7 @@ s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw)
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
- IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
break;
default:
ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
@@ -3893,23 +4393,11 @@ s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw)
}
status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl);
/* Restart auto-negotiation. */
- status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl);
-
- if (status != IXGBE_SUCCESS) {
- DEBUGOUT("Auto-Negotiation did not complete\n");
- return status;
- }
-
- link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl);
+ status = ixgbe_restart_an_internal_phy_x550em(hw);
return status;
}
@@ -4006,7 +4494,6 @@ STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
ixgbe_release_swfw_sync_X540(hw, hmask);
if (status != IXGBE_ERR_TOKEN_RETRY)
return status;
- msec_delay(FW_PHY_TOKEN_DELAY);
}
return status;
@@ -4043,7 +4530,7 @@ STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
* instances.
**/
s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data)
+ u32 device_type, u16 *phy_data)
{
s32 status;
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
@@ -4071,7 +4558,7 @@ s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
* The PHY Token is needed since the MDIO is shared between to MAC instances.
**/
s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+ u32 device_type, u16 phy_data)
{
s32 status;
u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
@@ -4079,7 +4566,7 @@ s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
DEBUGFUNC("ixgbe_write_phy_reg_x550a");
if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) {
- status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type,
phy_data);
hw->mac.ops.release_swfw_sync(hw, mask);
} else {
@@ -4268,4 +4755,3 @@ s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
return IXGBE_SUCCESS;
}
-
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.h b/drivers/net/ixgbe/base/ixgbe_x550.h
index 1d4b290c..cd4db29c 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.h
+++ b/drivers/net/ixgbe/base/ixgbe_x550.h
@@ -36,49 +36,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include "ixgbe_type.h"
-/* More phy definitions */
-#define IXGBE_M88E1500_COPPER_CTRL 0x0/* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_CTRL_RESET 0x8000
-#define IXGBE_M88E1500_COPPER_CTRL_AN_EN 0x1000
-#define IXGBE_M88E1500_COPPER_CTRL_RESTART_AN 0x0200
-#define IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX 0x0100
-#define IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB 0x0040
-#define IXGBE_M88E1500_1000T_CTRL 0x09 /* 1000Base-T Ctrl Reg */
-/* 1=Configure PHY as Master 0=Configure PHY as Slave */
-#define IXGBE_M88E1500_1000T_CTRL_MS_VALUE 0x0800
-/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
-#define IXGBE_M88E1500_1000T_CTRL_MS_ENABLE 0x1000
-#define IXGBE_M88E1500_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
-#define IXGBE_M88E1500_AUTO_COPPER_SGMII 0x2
-#define IXGBE_M88E1500_AUTO_COPPER_BASEX 0x3
-#define IXGBE_M88E1500_STATUS_LINK 0x0004 /* Interface Link Bit */
-#define IXGBE_M88E1500_MAC_CTRL_1 0x10
-#define IXGBE_M88E1500_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
-#define IXGBE_M88E1500_CFG_REG_1 0x0010
-#define IXGBE_M88E1500_CFG_REG_2 0x0011
-#define IXGBE_M88E1500_CFG_REG_3 0x0007
-#define IXGBE_M88E1500_MODE 0x0014
-#define IXGBE_M88E1500_PAGE_ADDR 0x16/* Page Offset reg */
-#define IXGBE_M88E1500_FIBER_CTRL 0x0/* Page 1 reg */
-#define IXGBE_M88E1500_FIBER_CTRL_RESET 0x8000
-#define IXGBE_M88E1500_FIBER_CTRL_SPEED_LSB 0x2000
-#define IXGBE_M88E1500_FIBER_CTRL_POWER_DOWN 0x0800
-#define IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL 0x0100
-#define IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB 0x0040
-#define IXGBE_M88E1500_EEE_CTRL_1 0x0/* Page 18 reg */
-#define IXGBE_M88E1500_EEE_CTRL_1_MS 0x0001/* EEE Master/Slave */
-#define IXGBE_M88E1500_GEN_CTRL 0x14/* Page 18 reg */
-#define IXGBE_M88E1500_GEN_CTRL_RESET 0x8000
-#define IXGBE_M88E1500_GEN_CTRL_SGMII_COPPER 0x0001/* Mode bits 0-2 */
-
-/* M88E1500 Specific Registers */
-#define IXGBE_M88E1500_PHY_SPEC_CTRL 0x10 /* PHY Specific Ctrl Reg */
-#define IXGBE_M88E1500_PHY_SPEC_STATUS 0x11 /* PHY Specific Stat Reg */
-
-#define IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE 0x0800
-#define IXGBE_M88E1500_PSCR_DOWNSHIFT_MASK 0x7000
-#define IXGBE_M88E1500_PSCR_DOWNSHIFT_6X 0x5000
-
s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw);
s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw);
s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw);
@@ -142,14 +99,18 @@ s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data);
+ u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data);
-s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw);
-void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw);
+ u32 device_type, u16 phy_data);
+s32 ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw);
s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index e1029301..edc9b22c 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -72,6 +72,8 @@
#include "base/ixgbe_phy.h"
#include "ixgbe_regs.h"
+#include "rte_pmd_ixgbe.h"
+
/*
* High threshold controlling when to start sending XOFF frames. Must be at
* least 8 bytes less than receive packet buffer size. This value is in units
@@ -429,23 +431,80 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
* The set of PCI devices this driver supports
*/
static const struct rte_pci_id pci_id_ixgbe_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{ .vendor_id = 0, /* sentinel */ },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
+#ifdef RTE_NIC_BYPASS
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
+#endif
+ { .vendor_id = 0, /* sentinel */ },
};
-
/*
* The set of PCI devices this driver supports (for 82599 VF)
*/
static const struct rte_pci_id pci_id_ixgbevf_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-{ .vendor_id = 0, /* sentinel */ },
-
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
+ { .vendor_id = 0, /* sentinel */ },
};
static const struct rte_eth_desc_lim rx_desc_lim = {
@@ -1505,10 +1564,11 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
static struct eth_driver rte_ixgbe_pmd = {
.pci_drv = {
- .name = "rte_ixgbe_pmd",
.id_table = pci_id_ixgbe_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_ixgbe_dev_init,
.eth_dev_uninit = eth_ixgbe_dev_uninit,
@@ -1520,43 +1580,16 @@ static struct eth_driver rte_ixgbe_pmd = {
*/
static struct eth_driver rte_ixgbevf_pmd = {
.pci_drv = {
- .name = "rte_ixgbevf_pmd",
.id_table = pci_id_ixgbevf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_ixgbevf_dev_init,
.eth_dev_uninit = eth_ixgbevf_dev_uninit,
.dev_private_size = sizeof(struct ixgbe_adapter),
};
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
- */
-static int
-rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_ixgbe_pmd);
- return 0;
-}
-
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
- */
-static int
-rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_ixgbevf_pmd);
- return 0;
-}
-
static int
ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
@@ -1944,6 +1977,8 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
/* check multi-queue mode */
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+ break;
case ETH_MQ_RX_VMDQ_DCB_RSS:
/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
PMD_INIT_LOG(ERR, "SRIOV active,"
@@ -1979,11 +2014,9 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev)
switch (dev_conf->txmode.mq_mode) {
case ETH_MQ_TX_VMDQ_DCB:
- /* DCB VMDQ in SRIOV mode, not implement yet */
- PMD_INIT_LOG(ERR, "SRIOV is active,"
- " unsupported VMDQ mq_mode tx %d.",
- dev_conf->txmode.mq_mode);
- return -EINVAL;
+ PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ break;
default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
break;
@@ -2238,6 +2271,36 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
goto error;
}
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ixgbe_vlan_offload_set(dev, mask);
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ /* Enable vlan filtering for VMDq */
+ ixgbe_vmdq_vlan_hw_filter_enable(dev);
+ }
+
+ /* Configure DCB hw */
+ ixgbe_configure_dcb(dev);
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ err = ixgbe_fdir_configure(dev);
+ if (err)
+ goto error;
+ }
+
+ /* Restore vf rate limit */
+ if (vfinfo != NULL) {
+ for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+ for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+ if (vfinfo[vf].tx_rate[idx] != 0)
+ ixgbe_set_vf_rate_limit(dev, vf,
+ vfinfo[vf].tx_rate[idx],
+ 1 << idx);
+ }
+
+ ixgbe_restore_statistics_mapping(dev);
+
err = ixgbe_dev_rxtx_start(dev);
if (err < 0) {
PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
@@ -2323,36 +2386,6 @@ skip_link_setup:
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK;
- ixgbe_vlan_offload_set(dev, mask);
-
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
- /* Enable vlan filtering for VMDq */
- ixgbe_vmdq_vlan_hw_filter_enable(dev);
- }
-
- /* Configure DCB hw */
- ixgbe_configure_dcb(dev);
-
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
- err = ixgbe_fdir_configure(dev);
- if (err)
- goto error;
- }
-
- /* Restore vf rate limit */
- if (vfinfo != NULL) {
- for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
- for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
- if (vfinfo[vf].tx_rate[idx] != 0)
- ixgbe_set_vf_rate_limit(dev, vf,
- vfinfo[vf].tx_rate[idx],
- 1 << idx);
- }
-
- ixgbe_restore_statistics_mapping(dev);
-
return 0;
error:
@@ -3414,7 +3447,7 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
PMD_INIT_LOG(INFO, " Port %d: Link Down",
(int)(dev->data->port_id));
}
- PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
+ PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
dev->pci_dev->addr.domain,
dev->pci_dev->addr.bus,
dev->pci_dev->addr.devid,
@@ -3526,7 +3559,7 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
ixgbe_dev_link_update(dev, 0);
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
ixgbe_dev_link_status_print(dev);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
@@ -4015,6 +4048,38 @@ ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
ixgbe_add_rar(dev, addr, 0, 0);
}
+int
+rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ int rar_entry;
+ uint8_t *new_mac = (uint8_t *)(mac_addr);
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+ ETHER_ADDR_LEN);
+ return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
+ IXGBE_RAH_AV);
+ }
+ return -EINVAL;
+}
+
static int
ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
@@ -4608,6 +4673,216 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
return ret;
}
+int
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+
+ mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+ mac->ops.set_mac_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (vlan_id > 4095)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+ if (vlan_id) {
+ ctrl = vlan_id;
+ ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
+ } else {
+ ctrl = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ /* enable or disable VMDQ loopback */
+ if (on)
+ ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
+ else
+ ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ int i;
+ int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ for (i = 0; i <= num_queues; i++) {
+ reg_value = IXGBE_QDE_WRITE |
+ (i << IXGBE_QDE_IDX_SHIFT) |
+ (on & IXGBE_QDE_ENABLE);
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ /* only support VF's 0 to 63 */
+ if ((vf >= dev_info.max_vfs) || (vf > 63))
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
+ if (on)
+ reg_value |= IXGBE_SRRCTL_DROP_EN;
+ else
+ reg_value &= ~IXGBE_SRRCTL_DROP_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ uint16_t queues_per_pool;
+ uint32_t q;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+
+ /* The PF has 128 queue pairs and in SRIOV configuration
+ * those queues will be assigned to VF's, so RXDCTL
+ * registers will be dealing with queues which will be
+ * assigned to VF's.
+ * Let's say we have SRIOV configured with 31 VF's then the
+ * first 124 queues 0-123 will be allocated to VF's and only
+ * the last 4 queues 123-127 will be assigned to the PF.
+ */
+
+ queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
+
+ for (q = 0; q < queues_per_pool; q++)
+ (*dev->dev_ops->vlan_strip_queue_set)(dev,
+ q + vf * queues_per_pool, on);
+ return 0;
+}
+
#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
@@ -7240,51 +7515,12 @@ ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
return ret;
}
-/* ixgbevf_update_xcast_mode - Update Multicast mode
- * @hw: pointer to the HW structure
- * @netdev: pointer to net device structure
- * @xcast_mode: new multicast mode
- *
- * Updates the Multicast Mode of VF.
- */
-static int ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
- int xcast_mode)
-{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- u32 msgbuf[2];
- s32 err;
-
- switch (hw->api_version) {
- case ixgbe_mbox_api_12:
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
- msgbuf[1] = xcast_mode;
-
- err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
- if (err)
- return err;
-
- err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
- if (err)
- return err;
-
- msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
- return -EPERM;
-
- return 0;
-}
-
static void
ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
}
static void
@@ -7292,7 +7528,7 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
}
static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
@@ -7305,7 +7541,7 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
/* PF reset VF event */
if (in_msg == IXGBE_PF_CONTROL_MSG)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
}
static int
@@ -7356,17 +7592,7 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
ixgbevf_dev_interrupt_action(dev);
}
-static struct rte_driver rte_ixgbe_driver = {
- .type = PMD_PDEV,
- .init = rte_ixgbe_pmd_init,
-};
-
-static struct rte_driver rte_ixgbevf_driver = {
- .type = PMD_PDEV,
- .init = rte_ixgbevf_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_ixgbe_driver, ixgbe);
-DRIVER_REGISTER_PCI_TABLE(ixgbe, pci_id_ixgbe_map);
-PMD_REGISTER_DRIVER(rte_ixgbevf_driver, ixgbevf);
-DRIVER_REGISTER_PCI_TABLE(ixgbevf, pci_id_ixgbevf_map);
+RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
+RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 56393ff2..26395e41 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,7 @@
#include "base/ixgbe_common.h"
#include "ixgbe_ethdev.h"
+#include "rte_pmd_ixgbe.h"
#define IXGBE_MAX_VFTA (128)
#define IXGBE_VF_MSG_SIZE_DEFAULT 1
@@ -660,6 +661,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
+ struct rte_pmd_ixgbe_mb_event_param cb_param;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
if (retval) {
@@ -674,27 +676,54 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
/* flush the ack before we write any messages back */
IXGBE_WRITE_FLUSH(hw);
+ /**
+ * initialise structure to send to user application
+ * will return response from user in retval field
+ */
+ cb_param.retval = RTE_PMD_IXGBE_MB_EVENT_PROCEED;
+ cb_param.vfid = vf;
+ cb_param.msg_type = msgbuf[0] & 0xFFFF;
+ cb_param.msg = (void *)msgbuf;
+
/* perform VF reset */
if (msgbuf[0] == IXGBE_VF_RESET) {
int ret = ixgbe_vf_reset(dev, vf, msgbuf);
vfinfo[vf].clear_to_send = true;
+
+ /* notify application about VF reset */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
return ret;
}
+ /**
+ * ask user application if we allowed to perform those functions
+ * if we get cb_param.retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED
+ * then business as usual,
+ * if 0, do nothing and send ACK to VF
+ * if cb_param.retval > 1, do nothing and send NAK to VF
+ */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
+
+ retval = cb_param.retval;
+
/* check & process VF to PF mailbox message */
switch ((msgbuf[0] & 0xFFFF)) {
case IXGBE_VF_SET_MAC_ADDR:
- retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
break;
case IXGBE_VF_SET_MULTICAST:
- retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
break;
case IXGBE_VF_SET_LPE:
- retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
break;
case IXGBE_VF_SET_VLAN:
- retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
break;
case IXGBE_VF_API_NEGOTIATE:
retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf);
@@ -704,7 +733,8 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE;
break;
case IXGBE_VF_UPDATE_XCAST_MODE:
- retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf);
+ if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED)
+ retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf);
break;
default:
PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 8a306b06..b2d9f454 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* Copyright 2014 6WIND S.A.
* All rights reserved.
*
@@ -58,7 +58,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
@@ -1345,7 +1344,9 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
* Bit 30: L4I, L4I integrity error
*/
static uint64_t error_to_pkt_flags_map[4] = {
- 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
};
pkt_flags = error_to_pkt_flags_map[(rx_status >>
@@ -3312,15 +3313,16 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
/**
* ixgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
- * @hw: pointer to hardware structure
+ * @dev: pointer to eth_dev structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*/
static void
-ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
+ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t q;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
if (hw->mac.type != ixgbe_mac_82598EB) {
@@ -3339,10 +3341,17 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw,
reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
- /* Disable drop for all queues */
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < 128; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < 128; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT) | IXGBE_QDE_ENABLE));
+ }
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
@@ -3377,7 +3386,7 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
/*Configure general DCB TX parameters*/
- ixgbe_dcb_tx_hw_config(hw, dcb_config);
+ ixgbe_dcb_tx_hw_config(dev, dcb_config);
}
static void
@@ -3660,7 +3669,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/*get DCB TX configuration parameters from rte_eth_conf*/
ixgbe_dcb_tx_config(dev, dcb_config);
/*Configure general DCB TX parameters*/
- ixgbe_dcb_tx_hw_config(hw, dcb_config);
+ ixgbe_dcb_tx_hw_config(dev, dcb_config);
break;
default:
PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
@@ -3809,7 +3818,7 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev)
(dev_conf->rxmode.mq_mode != ETH_MQ_RX_DCB_RSS))
return;
- if (dev->data->nb_rx_queues != ETH_DCB_NUM_QUEUES)
+ if (dev->data->nb_rx_queues > ETH_DCB_NUM_QUEUES)
return;
/** Configure DCB hardware **/
@@ -4081,12 +4090,13 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
case ETH_MQ_RX_VMDQ_RSS:
ixgbe_config_vf_rss(dev);
break;
-
- /* FIXME if support DCB/RSS together with VMDq & SRIOV */
case ETH_MQ_RX_VMDQ_DCB:
+ ixgbe_vmdq_dcb_configure(dev);
+ break;
+ /* FIXME if support DCB/RSS together with VMDq & SRIOV */
case ETH_MQ_RX_VMDQ_DCB_RSS:
PMD_INIT_LOG(ERR,
- "Could not support DCB with VMDq & SRIOV");
+ "Could not support DCB/RSS with VMDq & SRIOV");
return -1;
default:
ixgbe_config_vf_default(dev);
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index 3c3c0095..a3473b98 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -321,12 +321,8 @@ ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
- /*
- * - no csum error report support
- * - no header split support
- */
- if (rxmode->hw_ip_checksum == 1 ||
- rxmode->header_split == 1)
+ /* no header split support */
+ if (rxmode->header_split == 1)
return -1;
return 0;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index 64a329ea..f96cc85c 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -556,5 +556,11 @@ ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
int __attribute__((cold))
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+ /* no csum error report support */
+ if (rxmode->hw_ip_checksum == 1)
+ return -1;
+
return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
}
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index 7fb155a4..abbf2841 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -145,7 +145,7 @@ static inline void
desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
struct rte_mbuf **rx_pkts)
{
- __m128i ptype0, ptype1, vtag0, vtag1;
+ __m128i ptype0, ptype1, vtag0, vtag1, csum;
union {
uint16_t e[4];
uint64_t dword;
@@ -156,24 +156,45 @@ desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
0x0000, 0x0000, 0x0000, 0x0000,
0x000F, 0x000F, 0x000F, 0x000F);
+ /* mask the lower byte of ol_flags */
+ const __m128i ol_flags_msk = _mm_set_epi16(
+ 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x00FF, 0x00FF, 0x00FF, 0x00FF);
+
/* map rss type to rss hash flag */
const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0,
0, 0, 0, PKT_RX_RSS_HASH,
PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0,
PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0);
- /* mask everything except vlan present bit */
- const __m128i vlan_msk = _mm_set_epi16(
- 0x0000, 0x0000,
- 0x0000, 0x0000,
- IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
- IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP);
- /* map vlan present (0x8) to ol_flags */
- const __m128i vlan_map = _mm_set_epi8(
+ /* mask everything except vlan present and l4/ip csum error */
+ const __m128i vlan_csum_msk = _mm_set_epi16(
+ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
+ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
+ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
+ (IXGBE_RXDADV_ERR_TCPE | IXGBE_RXDADV_ERR_IPE) >> 16,
+ IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP,
+ IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP);
+ /* map vlan present (0x8), IPE (0x2), L4E (0x1) to ol_flags */
+ const __m128i vlan_csum_map_lo = _mm_set_epi8(
+ 0, 0, 0, 0,
+ vlan_flags | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ vlan_flags | PKT_RX_IP_CKSUM_BAD,
+ vlan_flags | PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ vlan_flags | PKT_RX_IP_CKSUM_GOOD,
+ 0, 0, 0, 0,
+ PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD);
+
+ const __m128i vlan_csum_map_hi = _mm_set_epi8(
0, 0, 0, 0,
- 0, 0, 0, vlan_flags,
+ 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
+ PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t),
0, 0, 0, 0,
- 0, 0, 0, 0);
+ 0, PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t), 0,
+ PKT_RX_L4_CKSUM_GOOD >> sizeof(uint8_t));
ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]);
ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]);
@@ -185,8 +206,26 @@ desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
ptype0 = _mm_shuffle_epi8(rss_flags, ptype0);
vtag1 = _mm_unpacklo_epi32(vtag0, vtag1);
- vtag1 = _mm_and_si128(vtag1, vlan_msk);
- vtag1 = _mm_shuffle_epi8(vlan_map, vtag1);
+ vtag1 = _mm_and_si128(vtag1, vlan_csum_msk);
+
+ /* csum bits are in the most significant, to use shuffle we need to
+ * shift them. Change mask to 0xc000 to 0x0003.
+ */
+ csum = _mm_srli_epi16(vtag1, 14);
+
+ /* now or the most significant 64 bits containing the checksum
+ * flags with the vlan present flags.
+ */
+ csum = _mm_srli_si128(csum, 8);
+ vtag1 = _mm_or_si128(csum, vtag1);
+
+ /* convert VP, IPE, L4E to ol_flags */
+ vtag0 = _mm_shuffle_epi8(vlan_csum_map_hi, vtag1);
+ vtag0 = _mm_slli_epi16(vtag0, sizeof(uint8_t));
+
+ vtag1 = _mm_shuffle_epi8(vlan_csum_map_lo, vtag1);
+ vtag1 = _mm_and_si128(vtag1, ol_flags_msk);
+ vtag1 = _mm_or_si128(vtag0, vtag1);
vtag1 = _mm_or_si128(ptype0, vtag1);
vol.dword = _mm_cvtsi128_si64(vtag1);
@@ -210,7 +249,6 @@ desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
* - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
* numbers of DD bit
* - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
- * - don't support ol_flags for rss and csum err
*/
static inline uint16_t
_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
@@ -243,7 +281,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
*/
rxdp = rxq->rx_ring + rxq->rx_tail;
- _mm_prefetch((const void *)rxdp, _MM_HINT_T0);
+ rte_prefetch0(rxdp);
/* See if we need to rearm the RX queue - gives the prefetch a bit
* of time to act
@@ -428,7 +466,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
* - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
* numbers of DD bit
* - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two
- * - don't support ol_flags for rss and csum err
*/
uint16_t
ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -441,7 +478,6 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
* vPMD receive routine that reassembles scattered packets
*
* Notice:
- * - don't support ol_flags for rss and csum err
* - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet
* - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST
* numbers of DD bit
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
new file mode 100644
index 00000000..c2fb8261
--- /dev/null
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -0,0 +1,204 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file rte_pmd_ixgbe.h
+ * ixgbe PMD specific functions.
+ *
+ **/
+
+#ifndef _PMD_IXGBE_H_
+#define _PMD_IXGBE_H_
+
+#include <rte_ethdev.h>
+
+/**
+ * Set the VF MAC address.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+ struct ether_addr *mac_addr);
+
+/**
+ * Enable/Disable VF VLAN anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF on which to set VLAN anti spoofing.
+ * @param on
+ * 1 - Enable VFs VLAN anti spoofing.
+ * 0 - Disable VFs VLAN anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable VF MAC anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF on which to set MAC anti spoofing.
+ * @param on
+ * 1 - Enable VFs MAC anti spoofing.
+ * 0 - Disable VFs MAC anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable tx loopback
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable tx loopback.
+ * 0 - Disable tx loopback.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on);
+
+/**
+ * set all queues drop enable bit
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - set the queue drop enable bit for all pools.
+ * 0 - reset the queue drop enable bit for all pools.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on);
+
+/**
+ * set drop enable bit in the VF split rx control register
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - set the drop enable bit in the split rx control register.
+ * 0 - reset the drop enable bit in the split rx control register.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Response sent back to ixgbe driver from user app after callback
+ */
+enum rte_pmd_ixgbe_mb_event_rsp {
+ RTE_PMD_IXGBE_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_IXGBE_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_IXGBE_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_IXGBE_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/**
+ * Data sent to the user application when the callback is executed.
+ */
+struct rte_pmd_ixgbe_mb_event_param {
+ uint16_t vfid; /**< Virtual Function number */
+ uint16_t msg_type; /**< VF to PF message type, defined in ixgbe_mbx.h */
+ uint16_t retval; /**< return value */
+ void *msg; /**< pointer to message */
+};
+#endif /* _PMD_IXGBE_H_ */
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index ef353984..92434f3f 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -2,3 +2,16 @@ DPDK_2.0 {
local: *;
};
+
+DPDK_16.11 {
+ global:
+
+ rte_pmd_ixgbe_set_all_queues_drop_en;
+ rte_pmd_ixgbe_set_tx_loopback;
+ rte_pmd_ixgbe_set_vf_mac_addr;
+ rte_pmd_ixgbe_set_vf_mac_anti_spoof;
+ rte_pmd_ixgbe_set_vf_split_drop_en;
+ rte_pmd_ixgbe_set_vf_vlan_anti_spoof;
+ rte_pmd_ixgbe_set_vf_vlan_insert;
+ rte_pmd_ixgbe_set_vf_vlan_stripq;
+} DPDK_2.0;
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 9f276192..da61a856 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -2995,25 +2995,20 @@ rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
if (rxq->csum)
ol_flags |=
- TRANSPOSE(~flags,
+ TRANSPOSE(flags,
IBV_EXP_CQ_RX_IP_CSUM_OK,
- PKT_RX_IP_CKSUM_BAD) |
- TRANSPOSE(~flags,
+ PKT_RX_IP_CKSUM_GOOD) |
+ TRANSPOSE(flags,
IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
- PKT_RX_L4_CKSUM_BAD);
- /*
- * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
- * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
- * (its value is 0).
- */
+ PKT_RX_L4_CKSUM_GOOD);
if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
ol_flags |=
- TRANSPOSE(~flags,
+ TRANSPOSE(flags,
IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
- PKT_RX_IP_CKSUM_BAD) |
- TRANSPOSE(~flags,
+ PKT_RX_IP_CKSUM_GOOD) |
+ TRANSPOSE(flags,
IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
- PKT_RX_L4_CKSUM_BAD);
+ PKT_RX_L4_CKSUM_GOOD);
return ol_flags;
}
@@ -5448,7 +5443,7 @@ mlx4_dev_link_status_handler(void *arg)
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
if (ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
/**
@@ -5471,7 +5466,7 @@ mlx4_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
if (ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
/**
@@ -5544,7 +5539,7 @@ static struct eth_driver mlx4_driver;
* 0 on success, negative errno value on failure.
*/
static int
-mlx4_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
{
struct ibv_device **list;
struct ibv_device *ibv_dev;
@@ -5803,7 +5798,7 @@ mlx4_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
snprintf(name, sizeof(name), "%s port %u",
ibv_get_device_name(ibv_dev), port);
- eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI);
+ eth_dev = rte_eth_dev_allocate(name);
}
if (eth_dev == NULL) {
ERROR("can not allocate rte ethdev");
@@ -5911,9 +5906,11 @@ static const struct rte_pci_id mlx4_pci_id_map[] = {
static struct eth_driver mlx4_driver = {
.pci_drv = {
- .name = MLX4_DRIVER_NAME,
+ .driver = {
+ .name = MLX4_DRIVER_NAME
+ },
.id_table = mlx4_pci_id_map,
- .devinit = mlx4_pci_devinit,
+ .probe = mlx4_pci_probe,
.drv_flags = RTE_PCI_DRV_INTR_LSC,
},
.dev_private_size = sizeof(struct priv)
@@ -5922,12 +5919,10 @@ static struct eth_driver mlx4_driver = {
/**
* Driver initialization routine.
*/
-static int
-rte_mlx4_pmd_init(const char *name, const char *args)
+RTE_INIT(rte_mlx4_pmd_init);
+static void
+rte_mlx4_pmd_init(void)
{
- (void)name;
- (void)args;
-
RTE_BUILD_BUG_ON(sizeof(wr_id_t) != sizeof(uint64_t));
/*
* RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
@@ -5938,13 +5933,7 @@ rte_mlx4_pmd_init(const char *name, const char *args)
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
ibv_fork_init();
rte_eal_pci_register(&mlx4_driver.pci_drv);
- return 0;
}
-static struct rte_driver rte_mlx4_driver = {
- .type = PMD_PDEV,
- .init = rte_mlx4_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_mlx4_driver, mlx4);
-DRIVER_REGISTER_PCI_TABLE(mlx4, mlx4_pci_id_map);
+RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
+RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index d0c7bc29..4c7505e2 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -96,7 +96,7 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO = 0x1007,
};
-#define MLX4_DRIVER_NAME "librte_pmd_mlx4"
+#define MLX4_DRIVER_NAME "net_mlx4"
/* Bit-field manipulation. */
#define BITFIELD_DECLARE(bf, type, size) \
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 9448374e..90cc35e4 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -355,7 +355,7 @@ static struct eth_driver mlx5_driver;
* 0 on success, negative errno value on failure.
*/
static int
-mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
{
struct ibv_device **list;
struct ibv_device *ibv_dev;
@@ -511,7 +511,7 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */
priv->cqe_comp = 1; /* Enable compression by default. */
- err = mlx5_args(priv, pci_dev->devargs);
+ err = mlx5_args(priv, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
@@ -617,7 +617,7 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
snprintf(name, sizeof(name), "%s port %u",
ibv_get_device_name(ibv_dev), port);
- eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_PCI);
+ eth_dev = rte_eth_dev_allocate(name);
}
if (eth_dev == NULL) {
ERROR("can not allocate rte ethdev");
@@ -729,9 +729,11 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
static struct eth_driver mlx5_driver = {
.pci_drv = {
- .name = MLX5_DRIVER_NAME,
+ .driver = {
+ .name = MLX5_DRIVER_NAME
+ },
.id_table = mlx5_pci_id_map,
- .devinit = mlx5_pci_devinit,
+ .probe = mlx5_pci_probe,
.drv_flags = RTE_PCI_DRV_INTR_LSC,
},
.dev_private_size = sizeof(struct priv)
@@ -740,11 +742,10 @@ static struct eth_driver mlx5_driver = {
/**
* Driver initialization routine.
*/
-static int
-rte_mlx5_pmd_init(const char *name, const char *args)
+RTE_INIT(rte_mlx5_pmd_init);
+static void
+rte_mlx5_pmd_init(void)
{
- (void)name;
- (void)args;
/*
* RDMAV_HUGEPAGES_SAFE tells ibv_fork_init() we intend to use
* huge pages. Calling ibv_fork_init() during init allows
@@ -754,13 +755,7 @@ rte_mlx5_pmd_init(const char *name, const char *args)
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
ibv_fork_init();
rte_eal_pci_register(&mlx5_driver.pci_drv);
- return 0;
}
-static struct rte_driver rte_mlx5_driver = {
- .type = PMD_PDEV,
- .init = rte_mlx5_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_mlx5_driver, mlx5);
-DRIVER_REGISTER_PCI_TABLE(mlx5, mlx5_pci_id_map);
+RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
+RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index cc2a6f3e..b32816e6 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -37,7 +37,7 @@
#include "mlx5_autoconf.h"
/* Reported driver name. */
-#define MLX5_DRIVER_NAME "librte_pmd_mlx5"
+#define MLX5_DRIVER_NAME "net_mlx5"
/* Maximum number of simultaneous MAC addresses. */
#define MLX5_MAX_MAC_ADDRESSES 128
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index ba1ec2a6..c0f73e93 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -933,7 +933,7 @@ recover:
if (rehash)
ret = rxq_rehash(dev, rxq_ctrl);
else
- ret = rxq_ctrl_setup(dev, rxq_ctrl, rxq->elts_n,
+ ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
rxq_ctrl->socket, NULL, rxq->mp);
if (!ret)
continue;
@@ -1194,7 +1194,7 @@ mlx5_dev_link_status_handler(void *arg)
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
if (ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
/**
@@ -1217,7 +1217,7 @@ mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
if (ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
/**
@@ -1441,7 +1441,7 @@ mlx5_secondary_data_setup(struct priv *priv)
if (txq_ctrl != NULL) {
if (txq_ctrl_setup(priv->dev,
txq_ctrl,
- primary_txq->elts_n,
+ 1 << primary_txq->elts_n,
primary_txq_ctrl->socket,
NULL) == 0) {
txq_ctrl->txq.stats.idx =
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 1c369cac..7f31a2f7 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -67,8 +67,15 @@
/* Maximum number of packets a multi-packet WQE can handle. */
#define MLX5_MPW_DSEG_MAX 5
-/* Room for inline data in regular work queue element. */
-#define MLX5_WQE64_INL_DATA 12
+/* WQE DWORD size */
+#define MLX5_WQE_DWORD_SIZE 16
+
+/* WQE size */
+#define MLX5_WQE_SIZE (4 * MLX5_WQE_DWORD_SIZE)
+
+/* Compute the number of DS. */
+#define MLX5_WQE_DS(n) \
+ (((n) + MLX5_WQE_DWORD_SIZE - 1) / MLX5_WQE_DWORD_SIZE)
/* Room for inline data in multi-packet WQE. */
#define MLX5_MWQE64_INL_DATA 28
@@ -106,59 +113,26 @@ struct mlx5_wqe_eth_seg_small {
uint16_t mss;
uint32_t rsvd2;
uint16_t inline_hdr_sz;
+ uint8_t inline_hdr[2];
};
-/* Regular WQE. */
-struct mlx5_wqe_regular {
- union {
- struct mlx5_wqe_ctrl_seg ctrl;
- uint32_t data[4];
- } ctrl;
- struct mlx5_wqe_eth_seg eseg;
- struct mlx5_wqe_data_seg dseg;
-} __rte_aligned(64);
-
-/* Inline WQE. */
-struct mlx5_wqe_inl {
- union {
- struct mlx5_wqe_ctrl_seg ctrl;
- uint32_t data[4];
- } ctrl;
- struct mlx5_wqe_eth_seg eseg;
+struct mlx5_wqe_inl_small {
uint32_t byte_cnt;
- uint8_t data[MLX5_WQE64_INL_DATA];
-} __rte_aligned(64);
+ uint8_t raw;
+};
-/* Multi-packet WQE. */
-struct mlx5_wqe_mpw {
- union {
- struct mlx5_wqe_ctrl_seg ctrl;
- uint32_t data[4];
- } ctrl;
+/* Small common part of the WQE. */
+struct mlx5_wqe {
+ uint32_t ctrl[4];
struct mlx5_wqe_eth_seg_small eseg;
- struct mlx5_wqe_data_seg dseg[2];
-} __rte_aligned(64);
+};
-/* Multi-packet WQE with inline. */
-struct mlx5_wqe_mpw_inl {
- union {
- struct mlx5_wqe_ctrl_seg ctrl;
- uint32_t data[4];
- } ctrl;
- struct mlx5_wqe_eth_seg_small eseg;
- uint32_t byte_cnt;
- uint8_t data[MLX5_MWQE64_INL_DATA];
+/* WQE. */
+struct mlx5_wqe64 {
+ struct mlx5_wqe hdr;
+ uint8_t raw[32];
} __rte_aligned(64);
-/* Union of all WQE types. */
-union mlx5_wqe {
- struct mlx5_wqe_regular wqe;
- struct mlx5_wqe_inl inl;
- struct mlx5_wqe_mpw mpw;
- struct mlx5_wqe_mpw_inl mpw_inl;
- uint8_t data[64];
-};
-
/* MPW session status. */
enum mlx5_mpw_state {
MLX5_MPW_STATE_OPENED,
@@ -172,7 +146,7 @@ struct mlx5_mpw {
unsigned int pkts_n;
unsigned int len;
unsigned int total_len;
- volatile union mlx5_wqe *wqe;
+ volatile struct mlx5_wqe *wqe;
union {
volatile struct mlx5_wqe_data_seg *dseg[MLX5_MPW_DSEG_MAX];
volatile uint8_t *raw;
@@ -184,7 +158,21 @@ struct mlx5_cqe {
#if (RTE_CACHE_LINE_SIZE == 128)
uint8_t padding[64];
#endif
- struct mlx5_cqe64 cqe64;
+ uint8_t pkt_info;
+ uint8_t rsvd0[11];
+ uint32_t rx_hash_res;
+ uint8_t rx_hash_type;
+ uint8_t rsvd1[11];
+ uint8_t hds_ip_ext;
+ uint8_t l4_hdr_type_etc;
+ uint16_t vlan_info;
+ uint8_t rsvd2[12];
+ uint32_t byte_cnt;
+ uint64_t timestamp;
+ uint8_t rsvd3[4];
+ uint16_t wqe_counter;
+ uint8_t rsvd4;
+ uint8_t op_own;
};
#endif /* RTE_PMD_MLX5_PRM_H_ */
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 7dbe8dd2..28e93d3e 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -723,7 +723,7 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
if (rxq_ctrl->rxq.elts == NULL)
return;
- for (i = 0; (i != rxq_ctrl->rxq.elts_n); ++i) {
+ for (i = 0; (i != (1u << rxq_ctrl->rxq.elts_n)); ++i) {
if ((*rxq_ctrl->rxq.elts)[i] != NULL)
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
(*rxq_ctrl->rxq.elts)[i] = NULL;
@@ -807,7 +807,7 @@ rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
int
rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl)
{
- unsigned int elts_n = rxq_ctrl->rxq.elts_n;
+ unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
unsigned int i;
struct ibv_exp_wq_attr mod;
int err;
@@ -870,7 +870,7 @@ rxq_setup(struct rxq_ctrl *tmpl)
struct ibv_cq *ibcq = tmpl->cq;
struct mlx5_cq *cq = to_mxxx(cq, cq);
struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq);
- struct rte_mbuf *(*elts)[tmpl->rxq.elts_n] =
+ struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] =
rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket);
if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) {
@@ -881,7 +881,7 @@ rxq_setup(struct rxq_ctrl *tmpl)
if (elts == NULL)
return ENOMEM;
tmpl->rxq.rq_db = rwq->rq.db;
- tmpl->rxq.cqe_n = ibcq->cqe + 1;
+ tmpl->rxq.cqe_n = log2above(ibcq->cqe);
tmpl->rxq.cq_ci = 0;
tmpl->rxq.rq_ci = 0;
tmpl->rxq.cq_db = cq->dbrec;
@@ -924,8 +924,9 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
.priv = priv,
.socket = socket,
.rxq = {
- .elts_n = desc,
+ .elts_n = log2above(desc),
.mp = mp,
+ .rss_hash = priv->rxqs_n > 1,
},
};
struct ibv_exp_wq_attr mod;
@@ -1153,7 +1154,7 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
}
/* Reuse buffers from original queue if possible. */
if (rxq_ctrl->rxq.elts_n) {
- assert(rxq_ctrl->rxq.elts_n == desc);
+ assert(1 << rxq_ctrl->rxq.elts_n == desc);
assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts);
ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts);
} else
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 79f7fa99..9b598014 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -81,10 +81,10 @@
* 0 the first time.
*/
static inline int
-check_cqe64_seen(volatile struct mlx5_cqe64 *cqe)
+check_cqe_seen(volatile struct mlx5_cqe *cqe)
{
static const uint8_t magic[] = "seen";
- volatile uint8_t (*buf)[sizeof(cqe->rsvd40)] = &cqe->rsvd40;
+ volatile uint8_t (*buf)[sizeof(cqe->rsvd3)] = &cqe->rsvd3;
int ret = 1;
unsigned int i;
@@ -99,9 +99,9 @@ check_cqe64_seen(volatile struct mlx5_cqe64 *cqe)
#endif /* NDEBUG */
static inline int
-check_cqe64(volatile struct mlx5_cqe64 *cqe,
- unsigned int cqes_n, const uint16_t ci)
- __attribute__((always_inline));
+check_cqe(volatile struct mlx5_cqe *cqe,
+ unsigned int cqes_n, const uint16_t ci)
+ __attribute__((always_inline));
/**
* Check whether CQE is valid.
@@ -117,8 +117,8 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe,
* 0 on success, 1 on failure.
*/
static inline int
-check_cqe64(volatile struct mlx5_cqe64 *cqe,
- unsigned int cqes_n, const uint16_t ci)
+check_cqe(volatile struct mlx5_cqe *cqe,
+ unsigned int cqes_n, const uint16_t ci)
{
uint16_t idx = ci & cqes_n;
uint8_t op_own = cqe->op_own;
@@ -136,14 +136,14 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe,
if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) ||
(syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR))
return 0;
- if (!check_cqe64_seen(cqe))
+ if (!check_cqe_seen(cqe))
ERROR("unexpected CQE error %u (0x%02x)"
" syndrome 0x%02x",
op_code, op_code, syndrome);
return 1;
} else if ((op_code != MLX5_CQE_RESP_SEND) &&
(op_code != MLX5_CQE_REQ)) {
- if (!check_cqe64_seen(cqe))
+ if (!check_cqe_seen(cqe))
ERROR("unexpected CQE opcode %u (0x%02x)",
op_code, op_code);
return 1;
@@ -152,6 +152,9 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe,
return 0;
}
+static inline void
+txq_complete(struct txq *txq) __attribute__((always_inline));
+
/**
* Manage TX completions.
*
@@ -160,34 +163,34 @@ check_cqe64(volatile struct mlx5_cqe64 *cqe,
* @param txq
* Pointer to TX queue structure.
*/
-static void
+static inline void
txq_complete(struct txq *txq)
{
- const unsigned int elts_n = txq->elts_n;
- const unsigned int cqe_n = txq->cqe_n;
+ const unsigned int elts_n = 1 << txq->elts_n;
+ const unsigned int cqe_n = 1 << txq->cqe_n;
const unsigned int cqe_cnt = cqe_n - 1;
uint16_t elts_free = txq->elts_tail;
uint16_t elts_tail;
uint16_t cq_ci = txq->cq_ci;
- volatile struct mlx5_cqe64 *cqe = NULL;
- volatile union mlx5_wqe *wqe;
+ volatile struct mlx5_cqe *cqe = NULL;
+ volatile struct mlx5_wqe *wqe;
do {
- volatile struct mlx5_cqe64 *tmp;
+ volatile struct mlx5_cqe *tmp;
- tmp = &(*txq->cqes)[cq_ci & cqe_cnt].cqe64;
- if (check_cqe64(tmp, cqe_n, cq_ci))
+ tmp = &(*txq->cqes)[cq_ci & cqe_cnt];
+ if (check_cqe(tmp, cqe_n, cq_ci))
break;
cqe = tmp;
#ifndef NDEBUG
if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) {
- if (!check_cqe64_seen(cqe))
+ if (!check_cqe_seen(cqe))
ERROR("unexpected compressed CQE, TX stopped");
return;
}
if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) ||
(MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) {
- if (!check_cqe64_seen(cqe))
+ if (!check_cqe_seen(cqe))
ERROR("unexpected error CQE, TX stopped");
return;
}
@@ -196,9 +199,10 @@ txq_complete(struct txq *txq)
} while (1);
if (unlikely(cqe == NULL))
return;
- wqe = &(*txq->wqes)[htons(cqe->wqe_counter) & (txq->wqe_n - 1)];
- elts_tail = wqe->wqe.ctrl.data[3];
- assert(elts_tail < txq->wqe_n);
+ wqe = &(*txq->wqes)[htons(cqe->wqe_counter) &
+ ((1 << txq->wqe_n) - 1)].hdr;
+ elts_tail = wqe->ctrl[3];
+ assert(elts_tail < (1 << txq->wqe_n));
/* Free buffers. */
while (elts_free != elts_tail) {
struct rte_mbuf *elt = (*txq->elts)[elts_free];
@@ -284,112 +288,6 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
}
/**
- * Write a regular WQE.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param wqe
- * Pointer to the WQE to fill.
- * @param buf
- * Buffer.
- * @param length
- * Packet length.
- *
- * @return ds
- * Number of DS elements consumed.
- */
-static inline unsigned int
-mlx5_wqe_write(struct txq *txq, volatile union mlx5_wqe *wqe,
- struct rte_mbuf *buf, uint32_t length)
-{
- uintptr_t raw = (uintptr_t)&wqe->wqe.eseg.inline_hdr_start;
- uint16_t ds;
- uint16_t pkt_inline_sz = 16;
- uintptr_t addr = rte_pktmbuf_mtod(buf, uintptr_t);
- struct mlx5_wqe_data_seg *dseg = NULL;
-
- assert(length >= 16);
- /* Start the know and common part of the WQE structure. */
- wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
- wqe->wqe.ctrl.data[2] = 0;
- wqe->wqe.ctrl.data[3] = 0;
- wqe->wqe.eseg.rsvd0 = 0;
- wqe->wqe.eseg.rsvd1 = 0;
- wqe->wqe.eseg.mss = 0;
- wqe->wqe.eseg.rsvd2 = 0;
- /* Start by copying the Ethernet Header. */
- rte_mov16((uint8_t *)raw, (uint8_t *)addr);
- length -= 16;
- addr += 16;
- /* Replace the Ethernet type by the VLAN if necessary. */
- if (buf->ol_flags & PKT_TX_VLAN_PKT) {
- uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
-
- memcpy((uint8_t *)(raw + 16 - sizeof(vlan)),
- &vlan, sizeof(vlan));
- addr -= sizeof(vlan);
- length += sizeof(vlan);
- }
- /* Inline if enough room. */
- if (txq->max_inline != 0) {
- uintptr_t end = (uintptr_t)&(*txq->wqes)[txq->wqe_n];
- uint16_t max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
- uint16_t room;
-
- raw += 16;
- room = end - (uintptr_t)raw;
- if (room > max_inline) {
- uintptr_t addr_end = (addr + max_inline) &
- ~(RTE_CACHE_LINE_SIZE - 1);
- uint16_t copy_b = ((addr_end - addr) > length) ?
- length :
- (addr_end - addr);
-
- rte_memcpy((void *)raw, (void *)addr, copy_b);
- addr += copy_b;
- length -= copy_b;
- pkt_inline_sz += copy_b;
- /* Sanity check. */
- assert(addr <= addr_end);
- }
- /* Store the inlined packet size in the WQE. */
- wqe->wqe.eseg.inline_hdr_sz = htons(pkt_inline_sz);
- /*
- * 2 DWORDs consumed by the WQE header + 1 DSEG +
- * the size of the inline part of the packet.
- */
- ds = 2 + ((pkt_inline_sz - 2 + 15) / 16);
- if (length > 0) {
- dseg = (struct mlx5_wqe_data_seg *)
- ((uintptr_t)wqe + (ds * 16));
- if ((uintptr_t)dseg >= end)
- dseg = (struct mlx5_wqe_data_seg *)
- ((uintptr_t)&(*txq->wqes)[0]);
- goto use_dseg;
- }
- } else {
- /* Add the remaining packet as a simple ds. */
- ds = 3;
- /*
- * No inline has been done in the packet, only the Ethernet
- * Header as been stored.
- */
- wqe->wqe.eseg.inline_hdr_sz = htons(16);
- dseg = (struct mlx5_wqe_data_seg *)
- ((uintptr_t)wqe + (ds * 16));
-use_dseg:
- *dseg = (struct mlx5_wqe_data_seg) {
- .addr = htonll(addr),
- .byte_count = htonl(length),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
- };
- ++ds;
- }
- wqe->wqe.ctrl.data[1] = htonl(txq->qp_num_8s | ds);
- return ds;
-}
-
-/**
* Ring TX queue doorbell.
*
* @param txq
@@ -409,8 +307,8 @@ mlx5_tx_dbrec(struct txq *txq)
*txq->qp_db = htonl(txq->wqe_ci);
/* Ensure ordering between DB record and BF copy. */
rte_wmb();
- rte_mov16(dst, (uint8_t *)data);
- txq->bf_offset ^= txq->bf_buf_size;
+ memcpy(dst, (uint8_t *)data, 16);
+ txq->bf_offset ^= (1 << txq->bf_buf_size);
}
/**
@@ -424,9 +322,9 @@ mlx5_tx_dbrec(struct txq *txq)
static inline void
tx_prefetch_cqe(struct txq *txq, uint16_t ci)
{
- volatile struct mlx5_cqe64 *cqe;
+ volatile struct mlx5_cqe *cqe;
- cqe = &(*txq->cqes)[ci & (txq->cqe_n - 1)].cqe64;
+ cqe = &(*txq->cqes)[ci & ((1 << txq->cqe_n) - 1)];
rte_prefetch0(cqe);
}
@@ -441,9 +339,9 @@ tx_prefetch_cqe(struct txq *txq, uint16_t ci)
static inline void
tx_prefetch_wqe(struct txq *txq, uint16_t ci)
{
- volatile union mlx5_wqe *wqe;
+ volatile struct mlx5_wqe64 *wqe;
- wqe = &(*txq->wqes)[ci & (txq->wqe_n - 1)];
+ wqe = &(*txq->wqes)[ci & ((1 << txq->wqe_n) - 1)];
rte_prefetch0(wqe);
}
@@ -465,12 +363,15 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = txq->elts_n;
+ const unsigned int elts_n = 1 << txq->elts_n;
unsigned int i = 0;
unsigned int j = 0;
unsigned int max;
unsigned int comp;
- volatile union mlx5_wqe *wqe = NULL;
+ volatile struct mlx5_wqe *wqe = NULL;
+ unsigned int segs_n = 0;
+ struct rte_mbuf *buf = NULL;
+ uint8_t *raw;
if (unlikely(!pkts_n))
return 0;
@@ -484,13 +385,17 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (max > elts_n)
max -= elts_n;
do {
- struct rte_mbuf *buf = *(pkts++);
- unsigned int elts_head_next;
+ volatile struct mlx5_wqe_data_seg *dseg = NULL;
uint32_t length;
- unsigned int segs_n = buf->nb_segs;
- volatile struct mlx5_wqe_data_seg *dseg;
- unsigned int ds = sizeof(*wqe) / 16;
+ unsigned int ds = 0;
+ uintptr_t addr;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ uint32_t total_length = 0;
+#endif
+ /* first_seg */
+ buf = *(pkts++);
+ segs_n = buf->nb_segs;
/*
* Make sure there is enough room to store this packet and
* that one ring entry remains unused.
@@ -499,73 +404,176 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (max < segs_n + 1)
break;
max -= segs_n;
- --pkts_n;
- elts_head_next = (elts_head + 1) & (elts_n - 1);
- wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)];
- tx_prefetch_wqe(txq, txq->wqe_ci);
+ --segs_n;
+ if (!segs_n)
+ --pkts_n;
+ wqe = &(*txq->wqes)[txq->wqe_ci &
+ ((1 << txq->wqe_n) - 1)].hdr;
tx_prefetch_wqe(txq, txq->wqe_ci + 1);
- if (pkts_n)
+ if (pkts_n > 1)
rte_prefetch0(*pkts);
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
length = DATA_LEN(buf);
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ total_length = length;
+#endif
+ assert(length >= MLX5_WQE_DWORD_SIZE);
/* Update element. */
(*txq->elts)[elts_head] = buf;
+ elts_head = (elts_head + 1) & (elts_n - 1);
/* Prefetch next buffer data. */
- if (pkts_n)
- rte_prefetch0(rte_pktmbuf_mtod(*pkts,
- volatile void *));
+ if (pkts_n > 1) {
+ volatile void *pkt_addr;
+
+ pkt_addr = rte_pktmbuf_mtod(*pkts, volatile void *);
+ rte_prefetch0(pkt_addr);
+ }
/* Should we enable HW CKSUM offload */
if (buf->ol_flags &
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- wqe->wqe.eseg.cs_flags =
+ wqe->eseg.cs_flags =
MLX5_ETH_WQE_L3_CSUM |
MLX5_ETH_WQE_L4_CSUM;
} else {
- wqe->wqe.eseg.cs_flags = 0;
+ wqe->eseg.cs_flags = 0;
}
- ds = mlx5_wqe_write(txq, wqe, buf, length);
- if (segs_n == 1)
- goto skip_segs;
- dseg = (volatile struct mlx5_wqe_data_seg *)
- (((uintptr_t)wqe) + ds * 16);
- while (--segs_n) {
+ raw = (uint8_t *)(uintptr_t)&wqe->eseg.inline_hdr[0];
+ /* Start the know and common part of the WQE structure. */
+ wqe->ctrl[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
+ wqe->ctrl[2] = 0;
+ wqe->ctrl[3] = 0;
+ wqe->eseg.rsvd0 = 0;
+ wqe->eseg.rsvd1 = 0;
+ wqe->eseg.mss = 0;
+ wqe->eseg.rsvd2 = 0;
+ /* Start by copying the Ethernet Header. */
+ memcpy((uint8_t *)raw, ((uint8_t *)addr), 16);
+ length -= MLX5_WQE_DWORD_SIZE;
+ addr += MLX5_WQE_DWORD_SIZE;
+ /* Replace the Ethernet type by the VLAN if necessary. */
+ if (buf->ol_flags & PKT_TX_VLAN_PKT) {
+ uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
+
+ memcpy((uint8_t *)(raw + MLX5_WQE_DWORD_SIZE -
+ sizeof(vlan)),
+ &vlan, sizeof(vlan));
+ addr -= sizeof(vlan);
+ length += sizeof(vlan);
+ }
+ /* Inline if enough room. */
+ if (txq->max_inline != 0) {
+ uintptr_t end =
+ (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n];
+ uint16_t max_inline =
+ txq->max_inline * RTE_CACHE_LINE_SIZE;
+ uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE;
+ uint16_t room;
+
+ raw += MLX5_WQE_DWORD_SIZE;
+ room = end - (uintptr_t)raw;
+ if (room > max_inline) {
+ uintptr_t addr_end = (addr + max_inline) &
+ ~(RTE_CACHE_LINE_SIZE - 1);
+ uint16_t copy_b = ((addr_end - addr) > length) ?
+ length :
+ (addr_end - addr);
+
+ rte_memcpy((void *)raw, (void *)addr, copy_b);
+ addr += copy_b;
+ length -= copy_b;
+ pkt_inline_sz += copy_b;
+ /* Sanity check. */
+ assert(addr <= addr_end);
+ }
+ /* Store the inlined packet size in the WQE. */
+ wqe->eseg.inline_hdr_sz = htons(pkt_inline_sz);
/*
- * Spill on next WQE when the current one does not have
- * enough room left. Size of WQE must a be a multiple
- * of data segment size.
+ * 2 DWORDs consumed by the WQE header + 1 DSEG +
+ * the size of the inline part of the packet.
*/
- assert(!(sizeof(*wqe) % sizeof(*dseg)));
- if (!(ds % (sizeof(*wqe) / 16)))
- dseg = (volatile void *)
- &(*txq->wqes)[txq->wqe_ci++ &
- (txq->wqe_n - 1)];
- else
- ++dseg;
+ ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
+ if (length > 0) {
+ dseg = (struct mlx5_wqe_data_seg *)
+ ((uintptr_t)wqe +
+ (ds * MLX5_WQE_DWORD_SIZE));
+ if ((uintptr_t)dseg >= end)
+ dseg = (struct mlx5_wqe_data_seg *)
+ ((uintptr_t)&(*txq->wqes)[0]);
+ goto use_dseg;
+ } else if (!segs_n) {
+ goto next_pkt;
+ } else {
+ goto next_seg;
+ }
+ } else {
+ /*
+ * No inline has been done in the packet, only the
+ * Ethernet Header as been stored.
+ */
+ wqe->eseg.inline_hdr_sz = htons(MLX5_WQE_DWORD_SIZE);
+ dseg = (struct mlx5_wqe_data_seg *)
+ ((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
+ ds = 3;
+use_dseg:
+ /* Add the remaining packet as a simple ds. */
+ *dseg = (struct mlx5_wqe_data_seg) {
+ .addr = htonll(addr),
+ .byte_count = htonl(length),
+ .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ };
++ds;
- buf = buf->next;
- assert(buf);
- /* Store segment information. */
- dseg->byte_count = htonl(DATA_LEN(buf));
- dseg->lkey = txq_mp2mr(txq, txq_mb2mp(buf));
- dseg->addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
- (*txq->elts)[elts_head_next] = buf;
- elts_head_next = (elts_head_next + 1) & (elts_n - 1);
+ if (!segs_n)
+ goto next_pkt;
+ }
+next_seg:
+ assert(buf);
+ assert(ds);
+ assert(wqe);
+ /*
+ * Spill on next WQE when the current one does not have
+ * enough room left. Size of WQE must a be a multiple
+ * of data segment size.
+ */
+ assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
+ if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
+ unsigned int n = (txq->wqe_ci + ((ds + 3) / 4)) &
+ ((1 << txq->wqe_n) - 1);
+
+ dseg = (struct mlx5_wqe_data_seg *)
+ ((uintptr_t)&(*txq->wqes)[n]);
+ tx_prefetch_wqe(txq, n + 1);
+ } else {
+ ++dseg;
+ }
+ ++ds;
+ buf = buf->next;
+ assert(buf);
+ length = DATA_LEN(buf);
#ifdef MLX5_PMD_SOFT_COUNTERS
- length += DATA_LEN(buf);
+ total_length += length;
#endif
- ++j;
- }
- /* Update DS field in WQE. */
- wqe->wqe.ctrl.data[1] &= htonl(0xffffffc0);
- wqe->wqe.ctrl.data[1] |= htonl(ds & 0x3f);
-skip_segs:
+ /* Store segment information. */
+ *dseg = (struct mlx5_wqe_data_seg) {
+ .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)),
+ .byte_count = htonl(length),
+ .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ };
+ (*txq->elts)[elts_head] = buf;
+ elts_head = (elts_head + 1) & (elts_n - 1);
+ ++j;
+ --segs_n;
+ if (segs_n)
+ goto next_seg;
+ else
+ --pkts_n;
+next_pkt:
+ ++i;
+ wqe->ctrl[1] = htonl(txq->qp_num_8s | ds);
+ txq->wqe_ci += (ds + 3) / 4;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
- txq->stats.obytes += length;
+ txq->stats.obytes += total_length;
#endif
- /* Increment consumer index. */
- txq->wqe_ci += (ds + 3) / 4;
- elts_head = elts_head_next;
- ++i;
} while (pkts_n);
/* Take a shortcut if nothing must be sent. */
if (unlikely(i == 0))
@@ -574,9 +582,9 @@ skip_segs:
comp = txq->elts_comp + i + j;
if (comp >= MLX5_TX_COMP_THRESH) {
/* Request completion on last WQE. */
- wqe->wqe.ctrl.data[2] = htonl(8);
+ wqe->ctrl[2] = htonl(8);
/* Save elts_head in unused "immediate" field of WQE. */
- wqe->wqe.ctrl.data[3] = elts_head;
+ wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
} else {
txq->elts_comp = comp;
@@ -604,28 +612,29 @@ skip_segs:
static inline void
mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
{
- uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1);
+ uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
(volatile struct mlx5_wqe_data_seg (*)[])
- (uintptr_t)&(*txq->wqes)[(idx + 1) & (txq->wqe_n - 1)];
+ (uintptr_t)&(*txq->wqes)[(idx + 1) & ((1 << txq->wqe_n) - 1)];
mpw->state = MLX5_MPW_STATE_OPENED;
mpw->pkts_n = 0;
mpw->len = length;
mpw->total_len = 0;
- mpw->wqe = &(*txq->wqes)[idx];
- mpw->wqe->mpw.eseg.mss = htons(length);
- mpw->wqe->mpw.eseg.inline_hdr_sz = 0;
- mpw->wqe->mpw.eseg.rsvd0 = 0;
- mpw->wqe->mpw.eseg.rsvd1 = 0;
- mpw->wqe->mpw.eseg.rsvd2 = 0;
- mpw->wqe->mpw.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
- (txq->wqe_ci << 8) |
- MLX5_OPCODE_TSO);
- mpw->wqe->mpw.ctrl.data[2] = 0;
- mpw->wqe->mpw.ctrl.data[3] = 0;
- mpw->data.dseg[0] = &mpw->wqe->mpw.dseg[0];
- mpw->data.dseg[1] = &mpw->wqe->mpw.dseg[1];
+ mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr;
+ mpw->wqe->eseg.mss = htons(length);
+ mpw->wqe->eseg.inline_hdr_sz = 0;
+ mpw->wqe->eseg.rsvd0 = 0;
+ mpw->wqe->eseg.rsvd1 = 0;
+ mpw->wqe->eseg.rsvd2 = 0;
+ mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
+ (txq->wqe_ci << 8) | MLX5_OPCODE_TSO);
+ mpw->wqe->ctrl[2] = 0;
+ mpw->wqe->ctrl[3] = 0;
+ mpw->data.dseg[0] = (volatile struct mlx5_wqe_data_seg *)
+ (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
+ mpw->data.dseg[1] = (volatile struct mlx5_wqe_data_seg *)
+ (((uintptr_t)mpw->wqe) + (3 * MLX5_WQE_DWORD_SIZE));
mpw->data.dseg[2] = &(*dseg)[0];
mpw->data.dseg[3] = &(*dseg)[1];
mpw->data.dseg[4] = &(*dseg)[2];
@@ -648,7 +657,7 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
- mpw->wqe->mpw.ctrl.data[1] = htonl(txq->qp_num_8s | (2 + num));
+ mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | (2 + num));
mpw->state = MLX5_MPW_STATE_CLOSED;
if (num < 3)
++txq->wqe_ci;
@@ -676,7 +685,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = txq->elts_n;
+ const unsigned int elts_n = 1 << txq->elts_n;
unsigned int i = 0;
unsigned int j = 0;
unsigned int max;
@@ -726,11 +735,11 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if ((mpw.state == MLX5_MPW_STATE_OPENED) &&
((mpw.len != length) ||
(segs_n != 1) ||
- (mpw.wqe->mpw.eseg.cs_flags != cs_flags)))
+ (mpw.wqe->eseg.cs_flags != cs_flags)))
mlx5_mpw_close(txq, &mpw);
if (mpw.state == MLX5_MPW_STATE_CLOSED) {
mlx5_mpw_new(txq, &mpw, length);
- mpw.wqe->mpw.eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.cs_flags = cs_flags;
}
/* Multi-segment packets must be alone in their MPW. */
assert((segs_n == 1) || (mpw.pkts_n == 0));
@@ -776,12 +785,12 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* "j" includes both packets and segments. */
comp = txq->elts_comp + j;
if (comp >= MLX5_TX_COMP_THRESH) {
- volatile union mlx5_wqe *wqe = mpw.wqe;
+ volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
- wqe->mpw.ctrl.data[2] = htonl(8);
+ wqe->ctrl[2] = htonl(8);
/* Save elts_head in unused "immediate" field of WQE. */
- wqe->mpw.ctrl.data[3] = elts_head;
+ wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
} else {
txq->elts_comp = comp;
@@ -811,25 +820,28 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
static inline void
mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
{
- uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1);
+ uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
+ struct mlx5_wqe_inl_small *inl;
mpw->state = MLX5_MPW_INL_STATE_OPENED;
mpw->pkts_n = 0;
mpw->len = length;
mpw->total_len = 0;
- mpw->wqe = &(*txq->wqes)[idx];
- mpw->wqe->mpw_inl.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
- (txq->wqe_ci << 8) |
- MLX5_OPCODE_TSO);
- mpw->wqe->mpw_inl.ctrl.data[2] = 0;
- mpw->wqe->mpw_inl.ctrl.data[3] = 0;
- mpw->wqe->mpw_inl.eseg.mss = htons(length);
- mpw->wqe->mpw_inl.eseg.inline_hdr_sz = 0;
- mpw->wqe->mpw_inl.eseg.cs_flags = 0;
- mpw->wqe->mpw_inl.eseg.rsvd0 = 0;
- mpw->wqe->mpw_inl.eseg.rsvd1 = 0;
- mpw->wqe->mpw_inl.eseg.rsvd2 = 0;
- mpw->data.raw = &mpw->wqe->mpw_inl.data[0];
+ mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr;
+ mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_TSO);
+ mpw->wqe->ctrl[2] = 0;
+ mpw->wqe->ctrl[3] = 0;
+ mpw->wqe->eseg.mss = htons(length);
+ mpw->wqe->eseg.inline_hdr_sz = 0;
+ mpw->wqe->eseg.cs_flags = 0;
+ mpw->wqe->eseg.rsvd0 = 0;
+ mpw->wqe->eseg.rsvd1 = 0;
+ mpw->wqe->eseg.rsvd2 = 0;
+ inl = (struct mlx5_wqe_inl_small *)
+ (((uintptr_t)mpw->wqe) + 2 * MLX5_WQE_DWORD_SIZE);
+ mpw->data.raw = (uint8_t *)&inl->raw;
}
/**
@@ -844,17 +856,18 @@ static inline void
mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw)
{
unsigned int size;
+ struct mlx5_wqe_inl_small *inl = (struct mlx5_wqe_inl_small *)
+ (((uintptr_t)mpw->wqe) + (2 * MLX5_WQE_DWORD_SIZE));
- size = sizeof(*mpw->wqe) - MLX5_MWQE64_INL_DATA + mpw->total_len;
+ size = MLX5_WQE_SIZE - MLX5_MWQE64_INL_DATA + mpw->total_len;
/*
* Store size in multiple of 16 bytes. Control and Ethernet segments
* count as 2.
*/
- mpw->wqe->mpw_inl.ctrl.data[1] =
- htonl(txq->qp_num_8s | ((size + 15) / 16));
+ mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(size));
mpw->state = MLX5_MPW_STATE_CLOSED;
- mpw->wqe->mpw_inl.byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG);
- txq->wqe_ci += (size + (sizeof(*mpw->wqe) - 1)) / sizeof(*mpw->wqe);
+ inl->byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG);
+ txq->wqe_ci += (size + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
}
/**
@@ -876,7 +889,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
{
struct txq *txq = (struct txq *)dpdk_txq;
uint16_t elts_head = txq->elts_head;
- const unsigned int elts_n = txq->elts_n;
+ const unsigned int elts_n = 1 << txq->elts_n;
unsigned int i = 0;
unsigned int j = 0;
unsigned int max;
@@ -927,13 +940,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
if (mpw.state == MLX5_MPW_STATE_OPENED) {
if ((mpw.len != length) ||
(segs_n != 1) ||
- (mpw.wqe->mpw.eseg.cs_flags != cs_flags))
+ (mpw.wqe->eseg.cs_flags != cs_flags))
mlx5_mpw_close(txq, &mpw);
} else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) {
if ((mpw.len != length) ||
(segs_n != 1) ||
(length > inline_room) ||
- (mpw.wqe->mpw_inl.eseg.cs_flags != cs_flags)) {
+ (mpw.wqe->eseg.cs_flags != cs_flags)) {
mlx5_mpw_inline_close(txq, &mpw);
inline_room =
txq->max_inline * RTE_CACHE_LINE_SIZE;
@@ -943,10 +956,10 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
if ((segs_n != 1) ||
(length > inline_room)) {
mlx5_mpw_new(txq, &mpw, length);
- mpw.wqe->mpw.eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.cs_flags = cs_flags;
} else {
mlx5_mpw_inline_new(txq, &mpw, length);
- mpw.wqe->mpw_inl.eseg.cs_flags = cs_flags;
+ mpw.wqe->eseg.cs_flags = cs_flags;
}
}
/* Multi-segment packets must be alone in their MPW. */
@@ -992,7 +1005,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
addr = rte_pktmbuf_mtod(buf, uintptr_t);
(*txq->elts)[elts_head] = buf;
/* Maximum number of bytes before wrapping. */
- max = ((uintptr_t)&(*txq->wqes)[txq->wqe_n] -
+ max = ((uintptr_t)&(*txq->wqes)[1 << txq->wqe_n] -
(uintptr_t)mpw.data.raw);
if (length > max) {
rte_memcpy((void *)(uintptr_t)mpw.data.raw,
@@ -1011,7 +1024,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
mpw.data.raw += length;
}
if ((uintptr_t)mpw.data.raw ==
- (uintptr_t)&(*txq->wqes)[txq->wqe_n])
+ (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n])
mpw.data.raw =
(volatile void *)&(*txq->wqes)[0];
++mpw.pkts_n;
@@ -1039,12 +1052,12 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
/* "j" includes both packets and segments. */
comp = txq->elts_comp + j;
if (comp >= MLX5_TX_COMP_THRESH) {
- volatile union mlx5_wqe *wqe = mpw.wqe;
+ volatile struct mlx5_wqe *wqe = mpw.wqe;
/* Request completion on last WQE. */
- wqe->mpw_inl.ctrl.data[2] = htonl(8);
+ wqe->ctrl[2] = htonl(8);
/* Save elts_head in unused "immediate" field of WQE. */
- wqe->mpw_inl.ctrl.data[3] = elts_head;
+ wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
} else {
txq->elts_comp = comp;
@@ -1075,13 +1088,12 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
* Packet type for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
{
uint32_t pkt_type;
uint8_t flags = cqe->l4_hdr_type_etc;
- uint8_t info = cqe->rsvd0[0];
- if (info & MLX5_CQE_RX_TUNNEL_PACKET)
+ if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET)
pkt_type =
TRANSPOSE(flags,
MLX5_CQE_RX_OUTER_IPV4_PACKET,
@@ -1115,14 +1127,16 @@ rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe)
* Pointer to RX queue.
* @param cqe
* CQE to process.
+ * @param[out] rss_hash
+ * Packet RSS Hash result.
*
* @return
* Packet size in bytes (0 if there is none), -1 in case of completion
* with error.
*/
static inline int
-mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
- uint16_t cqe_cnt)
+mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+ uint16_t cqe_cnt, uint32_t *rss_hash)
{
struct rxq_zip *zip = &rxq->zip;
uint16_t cqe_n = cqe_cnt + 1;
@@ -1132,9 +1146,10 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
if (zip->ai) {
volatile struct mlx5_mini_cqe8 (*mc)[8] =
(volatile struct mlx5_mini_cqe8 (*)[8])
- (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].cqe64);
+ (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt]);
len = ntohl((*mc)[zip->ai & 7].byte_cnt);
+ *rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
if ((++zip->ai & 7) == 0) {
/*
* Increment consumer index to skip the number of
@@ -1149,7 +1164,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
uint16_t end = zip->cq_ci;
while (idx != end) {
- (*rxq->cqes)[idx & cqe_cnt].cqe64.op_own =
+ (*rxq->cqes)[idx & cqe_cnt].op_own =
MLX5_CQE_INVALIDATE;
++idx;
}
@@ -1161,7 +1176,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
int ret;
int8_t op_own;
- ret = check_cqe64(cqe, cqe_n, rxq->cq_ci);
+ ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
if (unlikely(ret == 1))
return 0;
++rxq->cq_ci;
@@ -1170,7 +1185,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
volatile struct mlx5_mini_cqe8 (*mc)[8] =
(volatile struct mlx5_mini_cqe8 (*)[8])
(uintptr_t)(&(*rxq->cqes)[rxq->cq_ci &
- cqe_cnt].cqe64);
+ cqe_cnt]);
/* Fix endianness. */
zip->cqe_cnt = ntohl(cqe->byte_cnt);
@@ -1189,9 +1204,11 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
/* Get packet size to return. */
len = ntohl((*mc)[0].byte_cnt);
+ *rss_hash = ntohl((*mc)[0].rx_hash_result);
zip->ai = 1;
} else {
len = ntohl(cqe->byte_cnt);
+ *rss_hash = ntohl(cqe->rx_hash_res);
}
/* Error while receiving packet. */
if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR))
@@ -1212,38 +1229,32 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe,
* Offload flags (ol_flags) for struct rte_mbuf.
*/
static inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe)
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
{
uint32_t ol_flags = 0;
uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK;
uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK;
- uint8_t info = cqe->rsvd0[0];
if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
(l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
- ol_flags |=
- (!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) *
- PKT_RX_IP_CKSUM_BAD);
+ ol_flags |= TRANSPOSE(cqe->hds_ip_ext,
+ MLX5_CQE_L3_OK,
+ PKT_RX_IP_CKSUM_GOOD);
if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) ||
(l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) ||
(l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) ||
(l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP))
+ ol_flags |= TRANSPOSE(cqe->hds_ip_ext,
+ MLX5_CQE_L4_OK,
+ PKT_RX_L4_CKSUM_GOOD);
+ if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
ol_flags |=
- (!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) *
- PKT_RX_L4_CKSUM_BAD);
- /*
- * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
- * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
- * (its value is 0).
- */
- if ((info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
- ol_flags |=
- TRANSPOSE(~cqe->l4_hdr_type_etc,
+ TRANSPOSE(cqe->l4_hdr_type_etc,
MLX5_CQE_RX_OUTER_IP_CSUM_OK,
- PKT_RX_IP_CKSUM_BAD) |
- TRANSPOSE(~cqe->l4_hdr_type_etc,
+ PKT_RX_IP_CKSUM_GOOD) |
+ TRANSPOSE(cqe->l4_hdr_type_etc,
MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK,
- PKT_RX_L4_CKSUM_BAD);
+ PKT_RX_L4_CKSUM_GOOD);
return ol_flags;
}
@@ -1264,21 +1275,22 @@ uint16_t
mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
{
struct rxq *rxq = dpdk_rxq;
- const unsigned int wqe_cnt = rxq->elts_n - 1;
- const unsigned int cqe_cnt = rxq->cqe_n - 1;
+ const unsigned int wqe_cnt = (1 << rxq->elts_n) - 1;
+ const unsigned int cqe_cnt = (1 << rxq->cqe_n) - 1;
const unsigned int sges_n = rxq->sges_n;
struct rte_mbuf *pkt = NULL;
struct rte_mbuf *seg = NULL;
- volatile struct mlx5_cqe64 *cqe =
- &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64;
+ volatile struct mlx5_cqe *cqe =
+ &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
unsigned int i = 0;
unsigned int rq_ci = rxq->rq_ci << sges_n;
- int len;
+ int len; /* keep its value across iterations. */
while (pkts_n) {
unsigned int idx = rq_ci & wqe_cnt;
volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx];
struct rte_mbuf *rep = (*rxq->elts)[idx];
+ uint32_t rss_hash_res = 0;
if (pkt)
NEXT(seg) = rep;
@@ -1306,9 +1318,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
break;
}
if (!pkt) {
- cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64;
- len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt);
- if (len == 0) {
+ cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
+ len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
+ &rss_hash_res);
+ if (!len) {
rte_mbuf_refcnt_set(rep, 0);
__rte_mbuf_raw_free(rep);
break;
@@ -1325,12 +1338,16 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Update packet information. */
pkt->packet_type = 0;
pkt->ol_flags = 0;
+ if (rxq->rss_hash) {
+ pkt->hash.rss = rss_hash_res;
+ pkt->ol_flags = PKT_RX_RSS_HASH;
+ }
if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
rxq->crc_present) {
if (rxq->csum) {
pkt->packet_type =
rxq_cq_to_pkt_type(cqe);
- pkt->ol_flags =
+ pkt->ol_flags |=
rxq_cq_to_ol_flags(rxq, cqe);
}
if (cqe->l4_hdr_type_etc &
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 05779ef5..5708c2a7 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -54,6 +54,7 @@
#endif
#include <rte_mbuf.h>
#include <rte_mempool.h>
+#include <rte_common.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
@@ -109,16 +110,18 @@ struct rxq {
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
unsigned int crc_present:1; /* CRC must be subtracted. */
unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
+ unsigned int cqe_n:4; /* Log 2 of CQ elements. */
+ unsigned int elts_n:4; /* Log 2 of Mbufs. */
+ unsigned int port_id:8;
+ unsigned int rss_hash:1; /* RSS hash result is enabled. */
+ unsigned int :9; /* Remaining bits. */
+ volatile uint32_t *rq_db;
+ volatile uint32_t *cq_db;
uint16_t rq_ci;
uint16_t cq_ci;
- uint16_t elts_n;
- uint16_t cqe_n; /* Number of CQ elements. */
- uint16_t port_id;
volatile struct mlx5_wqe_data_seg(*wqes)[];
volatile struct mlx5_cqe(*cqes)[];
struct rxq_zip zip; /* Compressed context. */
- volatile uint32_t *rq_db;
- volatile uint32_t *cq_db;
struct rte_mbuf *(*elts)[];
struct rte_mempool *mp;
struct mlx5_rxq_stats stats;
@@ -238,21 +241,22 @@ struct hash_rxq {
};
/* TX queue descriptor. */
+RTE_STD_C11
struct txq {
uint16_t elts_head; /* Current index in (*elts)[]. */
uint16_t elts_tail; /* First element awaiting completion. */
uint16_t elts_comp; /* Counter since last completion request. */
- uint16_t elts_n; /* (*elts)[] length. */
uint16_t cq_ci; /* Consumer index for completion queue. */
- uint16_t cqe_n; /* Number of CQ elements. */
uint16_t wqe_ci; /* Consumer index for work queue. */
- uint16_t wqe_n; /* Number of WQ elements. */
+ uint16_t elts_n:4; /* (*elts)[] length (in log2). */
+ uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
+ uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
+ uint16_t bf_buf_size:4; /* Log2 Blueflame size. */
uint16_t bf_offset; /* Blueflame offset. */
- uint16_t bf_buf_size; /* Blueflame size. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
- volatile union mlx5_wqe (*wqes)[]; /* Work queue. */
+ volatile struct mlx5_wqe64 (*wqes)[]; /* Work queue. */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
volatile void *bf_reg; /* Blueflame register. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index e4510efe..053665d5 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -81,8 +81,8 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
for (i = 0; (i != elts_n); ++i)
(*txq_ctrl->txq.elts)[i] = NULL;
- for (i = 0; (i != txq_ctrl->txq.wqe_n); ++i) {
- volatile union mlx5_wqe *wqe = &(*txq_ctrl->txq.wqes)[i];
+ for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) {
+ volatile struct mlx5_wqe64 *wqe = &(*txq_ctrl->txq.wqes)[i];
memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe));
}
@@ -101,7 +101,7 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
static void
txq_free_elts(struct txq_ctrl *txq_ctrl)
{
- unsigned int elts_n = txq_ctrl->txq.elts_n;
+ unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
unsigned int elts_head = txq_ctrl->txq.elts_head;
unsigned int elts_tail = txq_ctrl->txq.elts_tail;
struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
@@ -212,22 +212,22 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
"it should be set to %u", RTE_CACHE_LINE_SIZE);
return EINVAL;
}
- tmpl->txq.cqe_n = ibcq->cqe + 1;
+ tmpl->txq.cqe_n = log2above(ibcq->cqe);
tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
tmpl->txq.wqes =
- (volatile union mlx5_wqe (*)[])
+ (volatile struct mlx5_wqe64 (*)[])
(uintptr_t)qp->gen_data.sqstart;
- tmpl->txq.wqe_n = qp->sq.wqe_cnt;
+ tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
tmpl->txq.bf_reg = qp->gen_data.bf->reg;
tmpl->txq.bf_offset = qp->gen_data.bf->offset;
- tmpl->txq.bf_buf_size = qp->gen_data.bf->buf_size;
+ tmpl->txq.bf_buf_size = log2above(qp->gen_data.bf->buf_size);
tmpl->txq.cq_db = cq->dbrec;
tmpl->txq.cqes =
(volatile struct mlx5_cqe (*)[])
(uintptr_t)cq->active_buf->buf;
tmpl->txq.elts =
- (struct rte_mbuf *(*)[tmpl->txq.elts_n])
+ (struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])
((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
return 0;
}
@@ -277,7 +277,7 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
}
(void)conf; /* Thresholds configuration (ignored). */
assert(desc > MLX5_TX_COMP_THRESH);
- tmpl.txq.elts_n = desc;
+ tmpl.txq.elts_n = log2above(desc);
/* MRs will be registered in mp2mr[] later. */
attr.rd = (struct ibv_exp_res_domain_init_attr){
.comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
diff --git a/drivers/net/mpipe/mpipe_tilegx.c b/drivers/net/mpipe/mpipe_tilegx.c
index 93f87308..fbbbb002 100644
--- a/drivers/net/mpipe/mpipe_tilegx.c
+++ b/drivers/net/mpipe/mpipe_tilegx.c
@@ -33,7 +33,7 @@
#include <unistd.h>
#include <rte_eal.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_eal_memconfig.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
@@ -1549,7 +1549,7 @@ mpipe_link_mac(const char *ifname, uint8_t *mac)
}
static int
-rte_pmd_mpipe_devinit(const char *ifname,
+rte_pmd_mpipe_probe(const char *ifname,
const char *params __rte_unused)
{
gxio_mpipe_context_t *context;
@@ -1587,7 +1587,7 @@ rte_pmd_mpipe_devinit(const char *ifname,
return -ENODEV;
}
- eth_dev = rte_eth_dev_allocate(ifname, RTE_ETH_DEV_VIRTUAL);
+ eth_dev = rte_eth_dev_allocate(ifname);
if (!eth_dev) {
RTE_LOG(ERR, PMD, "%s: Failed to allocate device.\n", ifname);
rte_free(priv);
@@ -1623,18 +1623,18 @@ rte_pmd_mpipe_devinit(const char *ifname,
return 0;
}
-static struct rte_driver pmd_mpipe_xgbe_drv = {
- .type = PMD_VDEV,
- .init = rte_pmd_mpipe_devinit,
+static struct rte_vdev_driver pmd_mpipe_xgbe_drv = {
+ .probe = rte_pmd_mpipe_probe,
};
-static struct rte_driver pmd_mpipe_gbe_drv = {
- .type = PMD_VDEV,
- .init = rte_pmd_mpipe_devinit,
+static struct rte_vdev_driver pmd_mpipe_gbe_drv = {
+ .probe = rte_pmd_mpipe_probe,
};
-PMD_REGISTER_DRIVER(pmd_mpipe_xgbe_drv, xgbe);
-PMD_REGISTER_DRIVER(pmd_mpipe_gbe_drv, gbe);
+RTE_PMD_REGISTER_VDEV(net_mpipe_xgbe, pmd_mpipe_xgbe_drv);
+RTE_PMD_REGISTER_ALIAS(net_mpipe_xgbe, xgbe);
+RTE_PMD_REGISTER_VDEV(net_mpipe_gbe, pmd_mpipe_gbe_drv);
+RTE_PMD_REGISTER_ALIAS(net_mpipe_gbe, gbe);
static void __attribute__((constructor, used))
mpipe_init_contexts(void)
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 815296cb..c6b15874 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -214,7 +214,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name,
+ dev->driver->pci_drv.driver.name,
ring_name, dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
@@ -607,18 +607,8 @@ nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
static void
nfp_net_params_setup(struct nfp_net_hw *hw)
{
- uint32_t *mac_address;
-
nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
-
- /* A MAC address is 8 bytes long */
- mac_address = (uint32_t *)(hw->mac_addr);
-
- nn_cfg_writel(hw, NFP_NET_CFG_MACADDR,
- rte_cpu_to_be_32(*mac_address));
- nn_cfg_writel(hw, NFP_NET_CFG_MACADDR + 4,
- rte_cpu_to_be_32(*(mac_address + 4)));
}
static void
@@ -627,6 +617,17 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
}
+static void nfp_net_read_mac(struct nfp_net_hw *hw)
+{
+ uint32_t tmp;
+
+ tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
+ memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
+
+ tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
+ memcpy(&hw->mac_addr[4], &tmp, 2);
+}
+
static int
nfp_net_start(struct rte_eth_dev *dev)
{
@@ -732,6 +733,11 @@ nfp_net_close(struct rte_eth_dev *dev)
rte_intr_disable(&dev->pci_dev->intr_handle);
nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(&dev->pci_dev->intr_handle,
+ nfp_net_dev_interrupt_handler,
+ (void *)dev);
+
/*
* The ixgbe PMD driver disables the pcie master on the
* device. The i40e does not...
@@ -1000,7 +1006,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->driver_name = dev->driver->pci_drv.name;
+ dev_info->driver_name = dev->driver->pci_drv.driver.name;
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1213,7 +1219,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
nfp_net_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
nfp_net_dev_link_status_print(dev);
@@ -2413,8 +2419,11 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
return -ENOMEM;
}
- /* Using random mac addresses for VFs */
- eth_random_addr(&hw->mac_addr[0]);
+ nfp_net_read_mac(hw);
+
+ if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr))
+ /* Using random mac addresses for VFs */
+ eth_random_addr(&hw->mac_addr[0]);
/* Copying mac address to DPDK eth_dev struct */
ether_addr_copy((struct ether_addr *)hw->mac_addr,
@@ -2459,35 +2468,19 @@ static struct rte_pci_id pci_id_nfp_net_map[] = {
};
static struct eth_driver rte_nfp_net_pmd = {
- {
- .name = "rte_nfp_net_pmd",
+ .pci_drv = {
.id_table = pci_id_nfp_net_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = nfp_net_init,
.dev_private_size = sizeof(struct nfp_net_adapter),
};
-static int
-nfp_net_pmd_init(const char *name __rte_unused,
- const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- PMD_INIT_LOG(INFO, "librte_pmd_nfp_net version %s\n",
- NFP_NET_PMD_VERSION);
-
- rte_eth_driver_register(&rte_nfp_net_pmd);
- return 0;
-}
-
-static struct rte_driver rte_nfp_net_driver = {
- .type = PMD_PDEV,
- .init = nfp_net_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_nfp_net_driver, nfp);
-DRIVER_REGISTER_PCI_TABLE(nfp, pci_id_nfp_net_map);
+RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
/*
* Local variables:
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 7a248842..836d982a 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -35,7 +35,7 @@
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_kvargs.h>
#include <rte_spinlock.h>
@@ -517,7 +517,7 @@ eth_dev_null_create(const char *name,
goto error;
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL)
goto error;
@@ -611,7 +611,7 @@ get_packet_copy_arg(const char *key __rte_unused,
}
static int
-rte_pmd_null_devinit(const char *name, const char *params)
+rte_pmd_null_probe(const char *name, const char *params)
{
unsigned numa_node;
unsigned packet_size = default_packet_size;
@@ -663,7 +663,7 @@ free_kvlist:
}
static int
-rte_pmd_null_devuninit(const char *name)
+rte_pmd_null_remove(const char *name)
{
struct rte_eth_dev *eth_dev = NULL;
@@ -686,13 +686,13 @@ rte_pmd_null_devuninit(const char *name)
return 0;
}
-static struct rte_driver pmd_null_drv = {
- .type = PMD_VDEV,
- .init = rte_pmd_null_devinit,
- .uninit = rte_pmd_null_devuninit,
+static struct rte_vdev_driver pmd_null_drv = {
+ .probe = rte_pmd_null_probe,
+ .remove = rte_pmd_null_remove,
};
-PMD_REGISTER_DRIVER(pmd_null_drv, eth_null);
-DRIVER_REGISTER_PARAM_STRING(eth_null,
+RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
+RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
+RTE_PMD_REGISTER_PARAM_STRING(net_null,
"size=<int> "
"copy=<int>");
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 7b7126bf..0162f446 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* Copyright(c) 2014 6WIND S.A.
* All rights reserved.
*
@@ -33,23 +33,23 @@
*/
#include <time.h>
-#include <rte_mbuf.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-#include <rte_memcpy.h>
-#include <rte_string_fns.h>
-#include <rte_cycles.h>
-#include <rte_kvargs.h>
-#include <rte_dev.h>
#include <net/if.h>
#include <pcap.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_vdev.h>
+
#define RTE_ETH_PCAP_SNAPSHOT_LEN 65535
#define RTE_ETH_PCAP_SNAPLEN ETHER_MAX_JUMBO_FRAME_LEN
#define RTE_ETH_PCAP_PROMISC 1
#define RTE_ETH_PCAP_TIMEOUT -1
+
#define ETH_PCAP_RX_PCAP_ARG "rx_pcap"
#define ETH_PCAP_TX_PCAP_ARG "tx_pcap"
#define ETH_PCAP_RX_IFACE_ARG "rx_iface"
@@ -58,19 +58,25 @@
#define ETH_PCAP_ARG_MAXLEN 64
+#define RTE_PMD_PCAP_MAX_QUEUES 16
+
static char errbuf[PCAP_ERRBUF_SIZE];
static unsigned char tx_pcap_data[RTE_ETH_PCAP_SNAPLEN];
static struct timeval start_time;
static uint64_t start_cycles;
static uint64_t hz;
+struct queue_stat {
+ volatile unsigned long pkts;
+ volatile unsigned long bytes;
+ volatile unsigned long err_pkts;
+};
+
struct pcap_rx_queue {
pcap_t *pcap;
uint8_t in_port;
struct rte_mempool *mb_pool;
- volatile unsigned long rx_pkts;
- volatile unsigned long rx_bytes;
- volatile unsigned long err_pkts;
+ struct queue_stat rx_stat;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
};
@@ -78,36 +84,29 @@ struct pcap_rx_queue {
struct pcap_tx_queue {
pcap_dumper_t *dumper;
pcap_t *pcap;
- volatile unsigned long tx_pkts;
- volatile unsigned long tx_bytes;
- volatile unsigned long err_pkts;
+ struct queue_stat tx_stat;
char name[PATH_MAX];
char type[ETH_PCAP_ARG_MAXLEN];
};
-struct rx_pcaps {
- unsigned num_of_rx;
- pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
- const char *names[RTE_PMD_RING_MAX_RX_RINGS];
- const char *types[RTE_PMD_RING_MAX_RX_RINGS];
-};
-
-struct tx_pcaps {
- unsigned num_of_tx;
- pcap_dumper_t *dumpers[RTE_PMD_RING_MAX_TX_RINGS];
- pcap_t *pcaps[RTE_PMD_RING_MAX_RX_RINGS];
- const char *names[RTE_PMD_RING_MAX_RX_RINGS];
- const char *types[RTE_PMD_RING_MAX_RX_RINGS];
-};
-
struct pmd_internals {
- struct pcap_rx_queue rx_queue[RTE_PMD_RING_MAX_RX_RINGS];
- struct pcap_tx_queue tx_queue[RTE_PMD_RING_MAX_TX_RINGS];
+ struct pcap_rx_queue rx_queue[RTE_PMD_PCAP_MAX_QUEUES];
+ struct pcap_tx_queue tx_queue[RTE_PMD_PCAP_MAX_QUEUES];
int if_index;
int single_iface;
};
-const char *valid_arguments[] = {
+struct pmd_devargs {
+ unsigned int num_of_queue;
+ struct devargs_queue {
+ pcap_dumper_t *dumper;
+ pcap_t *pcap;
+ const char *name;
+ const char *type;
+ } queue[RTE_PMD_PCAP_MAX_QUEUES];
+};
+
+static const char *valid_arguments[] = {
ETH_PCAP_RX_PCAP_ARG,
ETH_PCAP_TX_PCAP_ARG,
ETH_PCAP_RX_IFACE_ARG,
@@ -116,11 +115,10 @@ const char *valid_arguments[] = {
NULL
};
-static int open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper);
-static int open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap);
-static int open_single_iface(const char *iface, pcap_t **pcap);
+static struct ether_addr eth_addr = {
+ .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 }
+};
-static struct ether_addr eth_addr = { .addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 } };
static const char *drivername = "Pcap PMD";
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
@@ -130,15 +128,12 @@ static struct rte_eth_link pmd_link = {
};
static int
-eth_pcap_rx_jumbo(struct rte_mempool *mb_pool,
- struct rte_mbuf *mbuf,
- const u_char *data,
- uint16_t data_len)
+eth_pcap_rx_jumbo(struct rte_mempool *mb_pool, struct rte_mbuf *mbuf,
+ const u_char *data, uint16_t data_len)
{
- struct rte_mbuf *m = mbuf;
-
/* Copy the first segment. */
uint16_t len = rte_pktmbuf_tailroom(mbuf);
+ struct rte_mbuf *m = mbuf;
rte_memcpy(rte_pktmbuf_append(mbuf, len), data, len);
data_len -= len;
@@ -178,7 +173,7 @@ eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
while (mbuf) {
rte_memcpy(data + data_len, rte_pktmbuf_mtod(mbuf, void *),
- mbuf->data_len);
+ mbuf->data_len);
data_len += mbuf->data_len;
mbuf = mbuf->next;
@@ -186,11 +181,9 @@ eth_pcap_gather_data(unsigned char *data, struct rte_mbuf *mbuf)
}
static uint16_t
-eth_pcap_rx(void *queue,
- struct rte_mbuf **bufs,
- uint16_t nb_pkts)
+eth_pcap_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
- unsigned i;
+ unsigned int i;
struct pcap_pkthdr header;
const u_char *packet;
struct rte_mbuf *mbuf;
@@ -210,17 +203,17 @@ eth_pcap_rx(void *queue,
packet = pcap_next(pcap_q->pcap, &header);
if (unlikely(packet == NULL))
break;
- else
- mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
+
+ mbuf = rte_pktmbuf_alloc(pcap_q->mb_pool);
if (unlikely(mbuf == NULL))
break;
/* Now get the space available for data in the mbuf */
- buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = rte_pktmbuf_data_room_size(pcap_q->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
if (header.caplen <= buf_size) {
- /* pcap packet will fit in the mbuf, go ahead and copy */
+ /* pcap packet will fit in the mbuf, can copy it */
rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
header.caplen);
mbuf->data_len = (uint16_t)header.caplen;
@@ -241,8 +234,9 @@ eth_pcap_rx(void *queue,
num_rx++;
rx_bytes += header.caplen;
}
- pcap_q->rx_pkts += num_rx;
- pcap_q->rx_bytes += rx_bytes;
+ pcap_q->rx_stat.pkts += num_rx;
+ pcap_q->rx_stat.bytes += rx_bytes;
+
return num_rx;
}
@@ -261,11 +255,9 @@ calculate_timestamp(struct timeval *ts) {
* Callback to handle writing packets to a pcap file.
*/
static uint16_t
-eth_pcap_tx_dumper(void *queue,
- struct rte_mbuf **bufs,
- uint16_t nb_pkts)
+eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
- unsigned i;
+ unsigned int i;
struct rte_mbuf *mbuf;
struct pcap_tx_queue *dumper_q = queue;
uint16_t num_tx = 0;
@@ -275,7 +267,8 @@ eth_pcap_tx_dumper(void *queue,
if (dumper_q->dumper == NULL || nb_pkts == 0)
return 0;
- /* writes the nb_pkts packets to the previously opened pcap file dumper */
+ /* writes the nb_pkts packets to the previously opened pcap file
+ * dumper */
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
calculate_timestamp(&header.ts);
@@ -292,8 +285,7 @@ eth_pcap_tx_dumper(void *queue,
tx_pcap_data);
} else {
RTE_LOG(ERR, PMD,
- "Dropping PCAP packet. "
- "Size (%d) > max jumbo size (%d).\n",
+ "Dropping PCAP packet. Size (%d) > max jumbo size (%d).\n",
mbuf->pkt_len,
ETHER_MAX_JUMBO_FRAME_LEN);
@@ -313,9 +305,10 @@ eth_pcap_tx_dumper(void *queue,
* we flush the pcap dumper within each burst.
*/
pcap_dump_flush(dumper_q->dumper);
- dumper_q->tx_pkts += num_tx;
- dumper_q->tx_bytes += tx_bytes;
- dumper_q->err_pkts += nb_pkts - num_tx;
+ dumper_q->tx_stat.pkts += num_tx;
+ dumper_q->tx_stat.bytes += tx_bytes;
+ dumper_q->tx_stat.err_pkts += nb_pkts - num_tx;
+
return num_tx;
}
@@ -323,11 +316,9 @@ eth_pcap_tx_dumper(void *queue,
* Callback to handle sending packets through a real NIC.
*/
static uint16_t
-eth_pcap_tx(void *queue,
- struct rte_mbuf **bufs,
- uint16_t nb_pkts)
+eth_pcap_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
- unsigned i;
+ unsigned int i;
int ret;
struct rte_mbuf *mbuf;
struct pcap_tx_queue *tx_queue = queue;
@@ -342,18 +333,16 @@ eth_pcap_tx(void *queue,
if (likely(mbuf->nb_segs == 1)) {
ret = pcap_sendpacket(tx_queue->pcap,
- rte_pktmbuf_mtod(mbuf, u_char *),
- mbuf->pkt_len);
+ rte_pktmbuf_mtod(mbuf, u_char *),
+ mbuf->pkt_len);
} else {
if (mbuf->pkt_len <= ETHER_MAX_JUMBO_FRAME_LEN) {
eth_pcap_gather_data(tx_pcap_data, mbuf);
ret = pcap_sendpacket(tx_queue->pcap,
- tx_pcap_data,
- mbuf->pkt_len);
+ tx_pcap_data, mbuf->pkt_len);
} else {
RTE_LOG(ERR, PMD,
- "Dropping PCAP packet. "
- "Size (%d) > max jumbo size (%d).\n",
+ "Dropping PCAP packet. Size (%d) > max jumbo size (%d).\n",
mbuf->pkt_len,
ETHER_MAX_JUMBO_FRAME_LEN);
@@ -369,16 +358,84 @@ eth_pcap_tx(void *queue,
rte_pktmbuf_free(mbuf);
}
- tx_queue->tx_pkts += num_tx;
- tx_queue->tx_bytes += tx_bytes;
- tx_queue->err_pkts += nb_pkts - num_tx;
+ tx_queue->tx_stat.pkts += num_tx;
+ tx_queue->tx_stat.bytes += tx_bytes;
+ tx_queue->tx_stat.err_pkts += nb_pkts - num_tx;
+
return num_tx;
}
+/*
+ * pcap_open_live wrapper function
+ */
+static inline int
+open_iface_live(const char *iface, pcap_t **pcap) {
+ *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
+ RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
+
+ if (*pcap == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+open_single_iface(const char *iface, pcap_t **pcap)
+{
+ if (open_iface_live(iface, pcap) < 0) {
+ RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
+{
+ pcap_t *tx_pcap;
+
+ /*
+ * We need to create a dummy empty pcap_t to use it
+ * with pcap_dump_open(). We create big enough an Ethernet
+ * pcap holder.
+ */
+ tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN);
+ if (tx_pcap == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
+ return -1;
+ }
+
+ /* The dumper is created using the previous pcap_t reference */
+ *dumper = pcap_dump_open(tx_pcap, pcap_filename);
+ if (*dumper == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n",
+ pcap_filename);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
+{
+ *pcap = pcap_open_offline(pcap_filename, errbuf);
+ if (*pcap == NULL) {
+ RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename,
+ errbuf);
+ return -1;
+ }
+
+ return 0;
+}
+
static int
eth_dev_start(struct rte_eth_dev *dev)
{
- unsigned i;
+ unsigned int i;
struct pmd_internals *internals = dev->data->dev_private;
struct pcap_tx_queue *tx;
struct pcap_rx_queue *rx;
@@ -400,12 +457,12 @@ eth_dev_start(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
tx = &internals->tx_queue[i];
- if (!tx->dumper && strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
+ if (!tx->dumper &&
+ strcmp(tx->type, ETH_PCAP_TX_PCAP_ARG) == 0) {
if (open_single_tx_pcap(tx->name, &tx->dumper) < 0)
return -1;
- }
-
- else if (!tx->pcap && strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
+ } else if (!tx->pcap &&
+ strcmp(tx->type, ETH_PCAP_TX_IFACE_ARG) == 0) {
if (open_single_iface(tx->name, &tx->pcap) < 0)
return -1;
}
@@ -421,17 +478,15 @@ eth_dev_start(struct rte_eth_dev *dev)
if (strcmp(rx->type, ETH_PCAP_RX_PCAP_ARG) == 0) {
if (open_single_rx_pcap(rx->name, &rx->pcap) < 0)
return -1;
- }
-
- else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
+ } else if (strcmp(rx->type, ETH_PCAP_RX_IFACE_ARG) == 0) {
if (open_single_iface(rx->name, &rx->pcap) < 0)
return -1;
}
}
status_up:
-
dev->data->dev_link.link_status = ETH_LINK_UP;
+
return 0;
}
@@ -443,7 +498,7 @@ status_up:
static void
eth_dev_stop(struct rte_eth_dev *dev)
{
- unsigned i;
+ unsigned int i;
struct pmd_internals *internals = dev->data->dev_private;
struct pcap_tx_queue *tx;
struct pcap_rx_queue *rx;
@@ -496,6 +551,7 @@ eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
+
dev_info->driver_name = drivername;
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
@@ -507,10 +563,9 @@ eth_dev_info(struct rte_eth_dev *dev,
}
static void
-eth_stats_get(struct rte_eth_dev *dev,
- struct rte_eth_stats *igb_stats)
+eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- unsigned i;
+ unsigned int i;
unsigned long rx_packets_total = 0, rx_bytes_total = 0;
unsigned long tx_packets_total = 0, tx_bytes_total = 0;
unsigned long tx_packets_err_total = 0;
@@ -518,42 +573,44 @@ eth_stats_get(struct rte_eth_dev *dev,
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < dev->data->nb_rx_queues; i++) {
- igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
- igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
- rx_packets_total += igb_stats->q_ipackets[i];
- rx_bytes_total += igb_stats->q_ibytes[i];
+ stats->q_ipackets[i] = internal->rx_queue[i].rx_stat.pkts;
+ stats->q_ibytes[i] = internal->rx_queue[i].rx_stat.bytes;
+ rx_packets_total += stats->q_ipackets[i];
+ rx_bytes_total += stats->q_ibytes[i];
}
for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
i < dev->data->nb_tx_queues; i++) {
- igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
- igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
- igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
- tx_packets_total += igb_stats->q_opackets[i];
- tx_bytes_total += igb_stats->q_obytes[i];
- tx_packets_err_total += igb_stats->q_errors[i];
+ stats->q_opackets[i] = internal->tx_queue[i].tx_stat.pkts;
+ stats->q_obytes[i] = internal->tx_queue[i].tx_stat.bytes;
+ stats->q_errors[i] = internal->tx_queue[i].tx_stat.err_pkts;
+ tx_packets_total += stats->q_opackets[i];
+ tx_bytes_total += stats->q_obytes[i];
+ tx_packets_err_total += stats->q_errors[i];
}
- igb_stats->ipackets = rx_packets_total;
- igb_stats->ibytes = rx_bytes_total;
- igb_stats->opackets = tx_packets_total;
- igb_stats->obytes = tx_bytes_total;
- igb_stats->oerrors = tx_packets_err_total;
+ stats->ipackets = rx_packets_total;
+ stats->ibytes = rx_bytes_total;
+ stats->opackets = tx_packets_total;
+ stats->obytes = tx_bytes_total;
+ stats->oerrors = tx_packets_err_total;
}
static void
eth_stats_reset(struct rte_eth_dev *dev)
{
- unsigned i;
+ unsigned int i;
struct pmd_internals *internal = dev->data->dev_private;
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- internal->rx_queue[i].rx_pkts = 0;
- internal->rx_queue[i].rx_bytes = 0;
+ internal->rx_queue[i].rx_stat.pkts = 0;
+ internal->rx_queue[i].rx_stat.bytes = 0;
}
+
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- internal->tx_queue[i].tx_pkts = 0;
- internal->tx_queue[i].tx_bytes = 0;
- internal->tx_queue[i].err_pkts = 0;
+ internal->tx_queue[i].tx_stat.pkts = 0;
+ internal->tx_queue[i].tx_stat.bytes = 0;
+ internal->tx_queue[i].tx_stat.err_pkts = 0;
}
}
@@ -584,9 +641,11 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
{
struct pmd_internals *internals = dev->data->dev_private;
struct pcap_rx_queue *pcap_q = &internals->rx_queue[rx_queue_id];
+
pcap_q->mb_pool = mb_pool;
dev->data->rx_queues[rx_queue_id] = pcap_q;
pcap_q->in_port = dev->data->port_id;
+
return 0;
}
@@ -597,15 +656,16 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
unsigned int socket_id __rte_unused,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
-
struct pmd_internals *internals = dev->data->dev_private;
+
dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
+
return 0;
}
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
- .dev_stop = eth_dev_stop,
+ .dev_stop = eth_dev_stop,
.dev_close = eth_dev_close,
.dev_configure = eth_dev_configure,
.dev_infos_get = eth_dev_info,
@@ -625,33 +685,23 @@ static const struct eth_dev_ops ops = {
static int
open_rx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned i;
+ unsigned int i;
const char *pcap_filename = value;
- struct rx_pcaps *pcaps = extra_args;
+ struct pmd_devargs *rx = extra_args;
pcap_t *pcap = NULL;
- for (i = 0; i < pcaps->num_of_rx; i++) {
+ for (i = 0; i < rx->num_of_queue; i++) {
if (open_single_rx_pcap(pcap_filename, &pcap) < 0)
return -1;
- pcaps->pcaps[i] = pcap;
- pcaps->names[i] = pcap_filename;
- pcaps->types[i] = key;
+ rx->queue[i].pcap = pcap;
+ rx->queue[i].name = pcap_filename;
+ rx->queue[i].type = key;
}
return 0;
}
-static int
-open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
-{
- if ((*pcap = pcap_open_offline(pcap_filename, errbuf)) == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", pcap_filename, errbuf);
- return -1;
- }
- return 0;
-}
-
/*
* Opens a pcap file for writing and stores a reference to it
* for use it later on.
@@ -659,79 +709,39 @@ open_single_rx_pcap(const char *pcap_filename, pcap_t **pcap)
static int
open_tx_pcap(const char *key, const char *value, void *extra_args)
{
- unsigned i;
+ unsigned int i;
const char *pcap_filename = value;
- struct tx_pcaps *dumpers = extra_args;
+ struct pmd_devargs *dumpers = extra_args;
pcap_dumper_t *dumper;
- for (i = 0; i < dumpers->num_of_tx; i++) {
+ for (i = 0; i < dumpers->num_of_queue; i++) {
if (open_single_tx_pcap(pcap_filename, &dumper) < 0)
return -1;
- dumpers->dumpers[i] = dumper;
- dumpers->names[i] = pcap_filename;
- dumpers->types[i] = key;
- }
-
- return 0;
-}
-
-static int
-open_single_tx_pcap(const char *pcap_filename, pcap_dumper_t **dumper)
-{
- pcap_t *tx_pcap;
- /*
- * We need to create a dummy empty pcap_t to use it
- * with pcap_dump_open(). We create big enough an Ethernet
- * pcap holder.
- */
-
- if ((tx_pcap = pcap_open_dead(DLT_EN10MB, RTE_ETH_PCAP_SNAPSHOT_LEN))
- == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't create dead pcap\n");
- return -1;
- }
-
- /* The dumper is created using the previous pcap_t reference */
- if ((*dumper = pcap_dump_open(tx_pcap, pcap_filename)) == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s for writing.\n", pcap_filename);
- return -1;
+ dumpers->queue[i].dumper = dumper;
+ dumpers->queue[i].name = pcap_filename;
+ dumpers->queue[i].type = key;
}
return 0;
}
/*
- * pcap_open_live wrapper function
- */
-static inline int
-open_iface_live(const char *iface, pcap_t **pcap) {
- *pcap = pcap_open_live(iface, RTE_ETH_PCAP_SNAPLEN,
- RTE_ETH_PCAP_PROMISC, RTE_ETH_PCAP_TIMEOUT, errbuf);
-
- if (*pcap == NULL) {
- RTE_LOG(ERR, PMD, "Couldn't open %s: %s\n", iface, errbuf);
- return -1;
- }
- return 0;
-}
-
-/*
* Opens an interface for reading and writing
*/
static inline int
open_rx_tx_iface(const char *key, const char *value, void *extra_args)
{
const char *iface = value;
- struct rx_pcaps *pcaps = extra_args;
+ struct pmd_devargs *tx = extra_args;
pcap_t *pcap = NULL;
if (open_single_iface(iface, &pcap) < 0)
return -1;
- pcaps->pcaps[0] = pcap;
- pcaps->names[0] = iface;
- pcaps->types[0] = key;
+ tx->queue[0].pcap = pcap;
+ tx->queue[0].name = iface;
+ tx->queue[0].type = key;
return 0;
}
@@ -742,17 +752,17 @@ open_rx_tx_iface(const char *key, const char *value, void *extra_args)
static inline int
open_rx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned i;
+ unsigned int i;
const char *iface = value;
- struct rx_pcaps *pcaps = extra_args;
+ struct pmd_devargs *rx = extra_args;
pcap_t *pcap = NULL;
- for (i = 0; i < pcaps->num_of_rx; i++) {
+ for (i = 0; i < rx->num_of_queue; i++) {
if (open_single_iface(iface, &pcap) < 0)
return -1;
- pcaps->pcaps[i] = pcap;
- pcaps->names[i] = iface;
- pcaps->types[i] = key;
+ rx->queue[i].pcap = pcap;
+ rx->queue[i].name = iface;
+ rx->queue[i].type = key;
}
return 0;
@@ -764,53 +774,33 @@ open_rx_iface(const char *key, const char *value, void *extra_args)
static int
open_tx_iface(const char *key, const char *value, void *extra_args)
{
- unsigned i;
+ unsigned int i;
const char *iface = value;
- struct tx_pcaps *pcaps = extra_args;
+ struct pmd_devargs *tx = extra_args;
pcap_t *pcap;
- for (i = 0; i < pcaps->num_of_tx; i++) {
+ for (i = 0; i < tx->num_of_queue; i++) {
if (open_single_iface(iface, &pcap) < 0)
return -1;
- pcaps->pcaps[i] = pcap;
- pcaps->names[i] = iface;
- pcaps->types[i] = key;
+ tx->queue[i].pcap = pcap;
+ tx->queue[i].name = iface;
+ tx->queue[i].type = key;
}
return 0;
}
static int
-open_single_iface(const char *iface, pcap_t **pcap)
-{
- if (open_iface_live(iface, pcap) < 0) {
- RTE_LOG(ERR, PMD, "Couldn't open interface %s\n", iface);
- return -1;
- }
-
- return 0;
-}
-
-static int
-rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
+pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
+ const unsigned int nb_tx_queues,
struct pmd_internals **internals,
- struct rte_eth_dev **eth_dev,
- struct rte_kvargs *kvlist)
+ struct rte_eth_dev **eth_dev)
{
struct rte_eth_dev_data *data = NULL;
- unsigned k_idx;
- struct rte_kvargs_pair *pair = NULL;
+ unsigned int numa_node = rte_socket_id();
- for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
- pair = &kvlist->pairs[k_idx];
- if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
- break;
- }
-
- RTE_LOG(INFO, PMD,
- "Creating pcap-backed ethdev on numa socket %u\n", numa_node);
+ RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %u\n",
+ numa_node);
/* now do all data allocation - for eth_dev structure
* and internal (private) data
@@ -819,33 +809,22 @@ rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
if (data == NULL)
goto error;
- *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
+ *internals = rte_zmalloc_socket(name, sizeof(**internals), 0,
+ numa_node);
if (*internals == NULL)
goto error;
/* reserve an ethdev entry */
- *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ *eth_dev = rte_eth_dev_allocate(name);
if (*eth_dev == NULL)
goto error;
- /* check length of device name */
- if ((strlen((*eth_dev)->data->name) + 1) > sizeof(data->name))
- goto error;
-
/* now put it all together
* - store queue data in internals,
* - store numa_node info in eth_dev
* - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
- /* NOTE: we'll replace the data element, of originally allocated eth_dev
- * so the rings are local per-process */
-
- if (pair == NULL)
- (*internals)->if_index = 0;
- else
- (*internals)->if_index = if_nametoindex(pair->value);
-
data->dev_private = *internals;
data->port_id = (*eth_dev)->data->port_id;
snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
@@ -853,9 +832,11 @@ rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
data->mac_addrs = &eth_addr;
- strncpy(data->name,
- (*eth_dev)->data->name, strlen((*eth_dev)->data->name));
+ /*
+ * NOTE: we'll replace the data element, of originally allocated
+ * eth_dev so the rings are local per-process
+ */
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
(*eth_dev)->driver = NULL;
@@ -874,13 +855,14 @@ error:
}
static int
-rte_eth_from_pcaps_common(const char *name, struct rx_pcaps *rx_queues,
- const unsigned nb_rx_queues, struct tx_pcaps *tx_queues,
- const unsigned nb_tx_queues, const unsigned numa_node,
- struct rte_kvargs *kvlist, struct pmd_internals **internals,
- struct rte_eth_dev **eth_dev)
+eth_from_pcaps_common(const char *name, struct pmd_devargs *rx_queues,
+ const unsigned int nb_rx_queues, struct pmd_devargs *tx_queues,
+ const unsigned int nb_tx_queues, struct rte_kvargs *kvlist,
+ struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
{
- unsigned i;
+ struct rte_kvargs_pair *pair = NULL;
+ unsigned int k_idx;
+ unsigned int i;
/* do some parameter checking */
if (rx_queues == NULL && nb_rx_queues > 0)
@@ -888,105 +870,84 @@ rte_eth_from_pcaps_common(const char *name, struct rx_pcaps *rx_queues,
if (tx_queues == NULL && nb_tx_queues > 0)
return -1;
- if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
- internals, eth_dev, kvlist) < 0)
+ if (pmd_init_internals(name, nb_rx_queues, nb_tx_queues, internals,
+ eth_dev) < 0)
return -1;
for (i = 0; i < nb_rx_queues; i++) {
- (*internals)->rx_queue[i].pcap = rx_queues->pcaps[i];
- snprintf((*internals)->rx_queue[i].name,
- sizeof((*internals)->rx_queue[i].name), "%s",
- rx_queues->names[i]);
- snprintf((*internals)->rx_queue[i].type,
- sizeof((*internals)->rx_queue[i].type), "%s",
- rx_queues->types[i]);
- }
- for (i = 0; i < nb_tx_queues; i++) {
- (*internals)->tx_queue[i].dumper = tx_queues->dumpers[i];
- snprintf((*internals)->tx_queue[i].name,
- sizeof((*internals)->tx_queue[i].name), "%s",
- tx_queues->names[i]);
- snprintf((*internals)->tx_queue[i].type,
- sizeof((*internals)->tx_queue[i].type), "%s",
- tx_queues->types[i]);
- }
+ struct pcap_rx_queue *rx = &(*internals)->rx_queue[i];
+ struct devargs_queue *queue = &rx_queues->queue[i];
- return 0;
-}
-
-static int
-rte_eth_from_pcaps_n_dumpers(const char *name,
- struct rx_pcaps *rx_queues,
- const unsigned nb_rx_queues,
- struct tx_pcaps *tx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
- struct rte_kvargs *kvlist)
-{
- struct pmd_internals *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
- int ret;
+ rx->pcap = queue->pcap;
+ snprintf(rx->name, sizeof(rx->name), "%s", queue->name);
+ snprintf(rx->type, sizeof(rx->type), "%s", queue->type);
+ }
- ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
- tx_queues, nb_tx_queues, numa_node, kvlist,
- &internals, &eth_dev);
+ for (i = 0; i < nb_tx_queues; i++) {
+ struct pcap_tx_queue *tx = &(*internals)->tx_queue[i];
+ struct devargs_queue *queue = &tx_queues->queue[i];
- if (ret < 0)
- return ret;
+ tx->dumper = queue->dumper;
+ tx->pcap = queue->pcap;
+ snprintf(tx->name, sizeof(tx->name), "%s", queue->name);
+ snprintf(tx->type, sizeof(tx->type), "%s", queue->type);
+ }
- /* using multiple pcaps/interfaces */
- internals->single_iface = 0;
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ if (strstr(pair->key, ETH_PCAP_IFACE_ARG) != NULL)
+ break;
+ }
- eth_dev->rx_pkt_burst = eth_pcap_rx;
- eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
+ if (pair == NULL)
+ (*internals)->if_index = 0;
+ else
+ (*internals)->if_index = if_nametoindex(pair->value);
return 0;
}
static int
-rte_eth_from_pcaps(const char *name,
- struct rx_pcaps *rx_queues,
- const unsigned nb_rx_queues,
- struct tx_pcaps *tx_queues,
- const unsigned nb_tx_queues,
- const unsigned numa_node,
- struct rte_kvargs *kvlist,
- int single_iface)
+eth_from_pcaps(const char *name, struct pmd_devargs *rx_queues,
+ const unsigned int nb_rx_queues, struct pmd_devargs *tx_queues,
+ const unsigned int nb_tx_queues, struct rte_kvargs *kvlist,
+ int single_iface, unsigned int using_dumpers)
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
int ret;
- ret = rte_eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
- tx_queues, nb_tx_queues, numa_node, kvlist,
- &internals, &eth_dev);
+ ret = eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
+ tx_queues, nb_tx_queues, kvlist, &internals, &eth_dev);
if (ret < 0)
return ret;
- /* store wether we are using a single interface for rx/tx or not */
+ /* store weather we are using a single interface for rx/tx or not */
internals->single_iface = single_iface;
eth_dev->rx_pkt_burst = eth_pcap_rx;
- eth_dev->tx_pkt_burst = eth_pcap_tx;
+
+ if (using_dumpers)
+ eth_dev->tx_pkt_burst = eth_pcap_tx_dumper;
+ else
+ eth_dev->tx_pkt_burst = eth_pcap_tx;
return 0;
}
-
static int
-rte_pmd_pcap_devinit(const char *name, const char *params)
+pmd_pcap_probe(const char *name, const char *params)
{
- unsigned numa_node, using_dumpers = 0;
- int ret;
+ unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
struct rte_kvargs *kvlist;
- struct rx_pcaps pcaps = {0};
- struct tx_pcaps dumpers = {0};
+ struct pmd_devargs pcaps = {0};
+ struct pmd_devargs dumpers = {0};
+ int single_iface = 0;
+ int ret;
RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
- numa_node = rte_socket_id();
-
gettimeofday(&start_time, NULL);
start_cycles = rte_get_timer_cycles();
hz = rte_get_timer_hz();
@@ -1003,29 +964,39 @@ rte_pmd_pcap_devinit(const char *name, const char *params)
ret = rte_kvargs_process(kvlist, ETH_PCAP_IFACE_ARG,
&open_rx_tx_iface, &pcaps);
+
if (ret < 0)
goto free_kvlist;
- dumpers.pcaps[0] = pcaps.pcaps[0];
- dumpers.names[0] = pcaps.names[0];
- dumpers.types[0] = pcaps.types[0];
- ret = rte_eth_from_pcaps(name, &pcaps, 1, &dumpers, 1,
- numa_node, kvlist, 1);
- goto free_kvlist;
+
+ dumpers.queue[0] = pcaps.queue[0];
+
+ single_iface = 1;
+ pcaps.num_of_queue = 1;
+ dumpers.num_of_queue = 1;
+
+ goto create_eth;
}
/*
* We check whether we want to open a RX stream from a real NIC or a
* pcap file
*/
- if ((pcaps.num_of_rx = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG))) {
+ pcaps.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_RX_PCAP_ARG);
+ if (pcaps.num_of_queue)
+ is_rx_pcap = 1;
+ else
+ pcaps.num_of_queue = rte_kvargs_count(kvlist,
+ ETH_PCAP_RX_IFACE_ARG);
+
+ if (pcaps.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
+ pcaps.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+
+ if (is_rx_pcap)
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_PCAP_ARG,
&open_rx_pcap, &pcaps);
- } else {
- pcaps.num_of_rx = rte_kvargs_count(kvlist,
- ETH_PCAP_RX_IFACE_ARG);
+ else
ret = rte_kvargs_process(kvlist, ETH_PCAP_RX_IFACE_ARG,
&open_rx_iface, &pcaps);
- }
if (ret < 0)
goto free_kvlist;
@@ -1034,35 +1005,38 @@ rte_pmd_pcap_devinit(const char *name, const char *params)
* We check whether we want to open a TX stream to a real NIC or a
* pcap file
*/
- if ((dumpers.num_of_tx = rte_kvargs_count(kvlist,
- ETH_PCAP_TX_PCAP_ARG))) {
+ dumpers.num_of_queue = rte_kvargs_count(kvlist, ETH_PCAP_TX_PCAP_ARG);
+ if (dumpers.num_of_queue)
+ is_tx_pcap = 1;
+ else
+ dumpers.num_of_queue = rte_kvargs_count(kvlist,
+ ETH_PCAP_TX_IFACE_ARG);
+
+ if (dumpers.num_of_queue > RTE_PMD_PCAP_MAX_QUEUES)
+ dumpers.num_of_queue = RTE_PMD_PCAP_MAX_QUEUES;
+
+ if (is_tx_pcap)
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_PCAP_ARG,
&open_tx_pcap, &dumpers);
- using_dumpers = 1;
- } else {
- dumpers.num_of_tx = rte_kvargs_count(kvlist,
- ETH_PCAP_TX_IFACE_ARG);
+ else
ret = rte_kvargs_process(kvlist, ETH_PCAP_TX_IFACE_ARG,
&open_tx_iface, &dumpers);
- }
if (ret < 0)
goto free_kvlist;
- if (using_dumpers)
- ret = rte_eth_from_pcaps_n_dumpers(name, &pcaps, pcaps.num_of_rx,
- &dumpers, dumpers.num_of_tx, numa_node, kvlist);
- else
- ret = rte_eth_from_pcaps(name, &pcaps, pcaps.num_of_rx, &dumpers,
- dumpers.num_of_tx, numa_node, kvlist, 0);
+create_eth:
+ ret = eth_from_pcaps(name, &pcaps, pcaps.num_of_queue, &dumpers,
+ dumpers.num_of_queue, kvlist, single_iface, is_tx_pcap);
free_kvlist:
rte_kvargs_free(kvlist);
+
return ret;
}
static int
-rte_pmd_pcap_devuninit(const char *name)
+pmd_pcap_remove(const char *name)
{
struct rte_eth_dev *eth_dev = NULL;
@@ -1085,16 +1059,16 @@ rte_pmd_pcap_devuninit(const char *name)
return 0;
}
-static struct rte_driver pmd_pcap_drv = {
- .type = PMD_VDEV,
- .init = rte_pmd_pcap_devinit,
- .uninit = rte_pmd_pcap_devuninit,
+static struct rte_vdev_driver pmd_pcap_drv = {
+ .probe = pmd_pcap_probe,
+ .remove = pmd_pcap_remove,
};
-PMD_REGISTER_DRIVER(pmd_pcap_drv, eth_pcap);
-DRIVER_REGISTER_PARAM_STRING(eth_pcap,
- "rx_pcap=<string> "
- "tx_pcap=<string> "
- "rx_iface=<ifc> "
- "tx_iface=<ifc> "
- "iface=<ifc>");
+RTE_PMD_REGISTER_VDEV(net_pcap, pmd_pcap_drv);
+RTE_PMD_REGISTER_ALIAS(net_pcap, eth_pcap);
+RTE_PMD_REGISTER_PARAM_STRING(net_pcap,
+ ETH_PCAP_RX_PCAP_ARG "=<string> "
+ ETH_PCAP_TX_PCAP_ARG "=<string> "
+ ETH_PCAP_RX_IFACE_ARG "=<ifc> "
+ ETH_PCAP_TX_IFACE_ARG "=<ifc> "
+ ETH_PCAP_IFACE_ARG "=<ifc>");
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index 7965a831..29b443df 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -14,8 +14,6 @@ LIB = librte_pmd_qede.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-LDLIBS += -lz
-
EXPORT_MAP := rte_pmd_qede_version.map
LIBABIVER := 1
@@ -48,11 +46,11 @@ endif
endif
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
-ifeq ($(shell gcc -Wno-unused-but-set-variable -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
endif
CFLAGS_BASE_DRIVER += -Wno-missing-declarations
-ifeq ($(shell gcc -Wno-maybe-uninitialized -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0)
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
CFLAGS_BASE_DRIVER += -Wno-maybe-uninitialized
endif
CFLAGS_BASE_DRIVER += -Wno-strict-prototypes
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 16029b58..28be9587 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -6,8 +6,6 @@
* See LICENSE.qede_pmd for copyright and licensing details.
*/
-#include <zlib.h>
-
#include <rte_memzone.h>
#include <rte_errno.h>
@@ -65,6 +63,27 @@ inline bool qede_test_bit(u32 nr, unsigned long *addr)
return res;
}
+static inline u32 qede_ffb(unsigned long word)
+{
+ unsigned long first_bit;
+
+ first_bit = __builtin_ffsl(word);
+ return first_bit ? (first_bit - 1) : OSAL_BITS_PER_UL;
+}
+
+inline u32 qede_find_first_bit(unsigned long *addr, u32 limit)
+{
+ u32 i;
+ u32 nwords = 0;
+ OSAL_BUILD_BUG_ON(!limit);
+ nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
+ for (i = 0; i < nwords; i++)
+ if (addr[i] != 0)
+ break;
+
+ return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffb(addr[i]);
+}
+
static inline u32 qede_ffz(unsigned long word)
{
unsigned long first_zero;
@@ -152,6 +171,7 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
return mz->addr;
}
+#ifdef CONFIG_ECORE_ZIPPED_FW
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
{
@@ -182,6 +202,7 @@ u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
return p_hwfn->stream->total_out / 4;
}
+#endif
void
qede_get_mcp_proto_stats(struct ecore_dev *edev,
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 3e2aeb03..0b446f2e 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -41,6 +41,8 @@ void qed_link_update(struct ecore_hwfn *hwfn);
#endif
#endif
+#define OSAL_WARN(arg1, arg2, arg3, ...) (0)
+
/* Memory Types */
typedef uint8_t u8;
typedef uint16_t u16;
@@ -301,6 +303,10 @@ bool qede_test_bit(u32, unsigned long *);
#define OSAL_TEST_BIT(bit, bitmap) \
qede_test_bit(bit, bitmap)
+u32 qede_find_first_bit(unsigned long *, u32);
+#define OSAL_FIND_FIRST_BIT(bitmap, length) \
+ qede_find_first_bit(bitmap, length)
+
u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \
qede_find_first_zero_bit(bitmap, length)
@@ -324,6 +330,8 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
#define OSAL_IOV_GET_OS_TYPE() 0
+#define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) 0
+#define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) 0
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf);
@@ -377,6 +385,8 @@ u32 qede_osal_log2(u32);
#define OSAL_ARRAY_SIZE(arr) RTE_DIM(arr)
#define OSAL_SPRINTF(name, pattern, ...) \
sprintf(name, pattern, ##__VA_ARGS__)
+#define OSAL_SNPRINTF(buf, size, format, ...) \
+ snprintf(buf, size, format, ##__VA_ARGS__)
#define OSAL_STRLEN(string) strlen(string)
#define OSAL_STRCPY(dst, string) strcpy(dst, string)
#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index 295a41f9..b431c78d 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -8,12 +8,89 @@
#ifndef __COMMON_HSI__
#define __COMMON_HSI__
+/********************************/
+/* PROTOCOL COMMON FW CONSTANTS */
+/********************************/
+
+/* Temporarily here should be added to HSI automatically by resource allocation
+ * tool.
+ */
+#define T_TEST_AGG_INT_TEMP 6
+#define M_TEST_AGG_INT_TEMP 8
+#define U_TEST_AGG_INT_TEMP 6
+#define X_TEST_AGG_INT_TEMP 14
+#define Y_TEST_AGG_INT_TEMP 4
+#define P_TEST_AGG_INT_TEMP 4
+
+#define X_FINAL_CLEANUP_AGG_INT 1
+
+#define EVENT_RING_PAGE_SIZE_BYTES 4096
+
+#define NUM_OF_GLOBAL_QUEUES 128
+#define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64
+
+#define ISCSI_CDU_TASK_SEG_TYPE 0
+#define FCOE_CDU_TASK_SEG_TYPE 0
+#define RDMA_CDU_TASK_SEG_TYPE 1
+
+#define FW_ASSERT_GENERAL_ATTN_IDX 32
+
+#define MAX_PINNED_CCFC 32
+
+#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3
+
+/* Queue Zone sizes in bytes */
+#define TSTORM_QZONE_SIZE 8 /*tstorm_scsi_queue_zone*/
+#define MSTORM_QZONE_SIZE 16 /*mstorm_eth_queue_zone. Used only for RX
+ *producer of VFs in backward compatibility
+ *mode.
+ */
+#define USTORM_QZONE_SIZE 8 /*ustorm_eth_queue_zone*/
+#define XSTORM_QZONE_SIZE 8 /*xstorm_eth_queue_zone*/
+#define YSTORM_QZONE_SIZE 0
+#define PSTORM_QZONE_SIZE 0
+
+/*Log of mstorm default VF zone size.*/
+#define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7
+/*Maximum number of RX queues that can be allocated to VF by default*/
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16
+/*Maximum number of RX queues that can be allocated to VF with doubled VF zone
+ * size. Up to 96 VF supported in this mode
+ */
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48
+/*Maximum number of RX queues that can be allocated to VF with 4 VF zone size.
+ * Up to 48 VF supported in this mode
+ */
+#define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112
+
+
+/********************************/
+/* CORE (LIGHT L2) FW CONSTANTS */
+/********************************/
+
+#define CORE_LL2_MAX_RAMROD_PER_CON 8
+#define CORE_LL2_TX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_BD_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_CQE_PAGE_SIZE_BYTES 4096
+#define CORE_LL2_RX_NUM_NEXT_PAGE_BDS 1
+
+#define CORE_LL2_TX_MAX_BDS_PER_PACKET 12
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
-#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 7
-#define FW_REVISION_VERSION 7
+#define MAX_NUM_LL2_RX_QUEUES 32
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
+
+
+/****************************************************************************/
+/* Include firmware version number only- do not add constants here to avoid */
+/* redundunt compilations */
+/****************************************************************************/
+
+
+#define FW_MAJOR_VERSION 8
+#define FW_MINOR_VERSION 10
+#define FW_REVISION_VERSION 9
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -35,9 +112,14 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
+/* in both BB and K2, the VF number starts from 16. so for arrays containing all
+ * possible PFs and VFs - we need a constant for this size
+ */
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_NUM_VPORTS_K2 (208)
@@ -49,6 +131,7 @@
#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
+/* 4-Port K2. */
#define NUM_PHYS_TCS_4PORT_K2 (4)
#define NUM_OF_PHYS_TCS (8)
@@ -70,6 +153,19 @@
#define NUM_OF_LCIDS (320)
#define NUM_OF_LTIDS (320)
+/* Clock values */
+#define MASTER_CLK_FREQ_E4 (375e6)
+#define STORM_CLK_FREQ_E4 (1000e6)
+#define CLK25M_CLK_FREQ_E4 (25e6)
+
+/* Global PXP windows (GTT) */
+#define NUM_OF_GTT 19
+#define GTT_DWORD_SIZE_BITS 10
+#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
+#define GTT_DWORD_SIZE (1 << GTT_DWORD_SIZE_BITS)
+
+/* Tools Version */
+#define TOOLS_VERSION 10
/*****************/
/* CDU CONSTANTS */
/*****************/
@@ -77,14 +173,21 @@
#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17)
#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
+#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+
+
/*****************/
/* DQ CONSTANTS */
/*****************/
/* DEMS */
#define DQ_DEMS_LEGACY 0
+#define DQ_DEMS_TOE_MORE_TO_SEND 3
+#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
+#define DQ_DEMS_ROCE_CQ_CONS 7
-/* XCM agg val selection */
+/* XCM agg val selection (HW) */
#define DQ_XCM_AGG_VAL_SEL_WORD2 0
#define DQ_XCM_AGG_VAL_SEL_WORD3 1
#define DQ_XCM_AGG_VAL_SEL_WORD4 2
@@ -94,7 +197,7 @@
#define DQ_XCM_AGG_VAL_SEL_REG5 6
#define DQ_XCM_AGG_VAL_SEL_REG6 7
-/* XCM agg val selection */
+/* XCM agg val selection (FW) */
#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \
DQ_XCM_AGG_VAL_SEL_WORD2
#define DQ_XCM_ETH_TX_BD_CONS_CMD \
@@ -108,8 +211,49 @@
#define DQ_XCM_CORE_SPQ_PROD_CMD \
DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5
-
-/* XCM agg counter flag selection */
+#define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5
+#define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3
+#define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
+#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+
+/* UCM agg val selection (HW) */
+#define DQ_UCM_AGG_VAL_SEL_WORD0 0
+#define DQ_UCM_AGG_VAL_SEL_WORD1 1
+#define DQ_UCM_AGG_VAL_SEL_WORD2 2
+#define DQ_UCM_AGG_VAL_SEL_WORD3 3
+#define DQ_UCM_AGG_VAL_SEL_REG0 4
+#define DQ_UCM_AGG_VAL_SEL_REG1 5
+#define DQ_UCM_AGG_VAL_SEL_REG2 6
+#define DQ_UCM_AGG_VAL_SEL_REG3 7
+
+/* UCM agg val selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2
+#define DQ_UCM_ETH_PMD_RX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD3
+#define DQ_UCM_ROCE_CQ_CONS_CMD DQ_UCM_AGG_VAL_SEL_REG0
+#define DQ_UCM_ROCE_CQ_PROD_CMD DQ_UCM_AGG_VAL_SEL_REG2
+
+/* TCM agg val selection (HW) */
+#define DQ_TCM_AGG_VAL_SEL_WORD0 0
+#define DQ_TCM_AGG_VAL_SEL_WORD1 1
+#define DQ_TCM_AGG_VAL_SEL_WORD2 2
+#define DQ_TCM_AGG_VAL_SEL_WORD3 3
+#define DQ_TCM_AGG_VAL_SEL_REG1 4
+#define DQ_TCM_AGG_VAL_SEL_REG2 5
+#define DQ_TCM_AGG_VAL_SEL_REG6 6
+#define DQ_TCM_AGG_VAL_SEL_REG9 7
+
+/* TCM agg val selection (FW) */
+#define DQ_TCM_L2B_BD_PROD_CMD DQ_TCM_AGG_VAL_SEL_WORD1
+#define DQ_TCM_ROCE_RQ_PROD_CMD DQ_TCM_AGG_VAL_SEL_WORD0
+
+/* XCM agg counter flag selection (HW) */
#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0
#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1
#define DQ_XCM_AGG_FLG_SHIFT_CF12 2
@@ -119,7 +263,7 @@
#define DQ_XCM_AGG_FLG_SHIFT_CF22 6
#define DQ_XCM_AGG_FLG_SHIFT_CF23 7
-/* XCM agg counter flag selection */
+/* XCM agg counter flag selection (FW) */
#define DQ_XCM_ETH_DQ_CF_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF18)
#define DQ_XCM_CORE_DQ_CF_CMD (1 << \
@@ -134,6 +278,81 @@
DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ETH_TPH_EN_CMD (1 << \
DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_FCOE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_DQ_FLUSH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_ISCSI_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD (1 << DQ_XCM_AGG_FLG_SHIFT_CF22)
+
+/* UCM agg counter flag selection (HW) */
+#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_UCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_UCM_AGG_FLG_SHIFT_CF3 2
+#define DQ_UCM_AGG_FLG_SHIFT_CF4 3
+#define DQ_UCM_AGG_FLG_SHIFT_CF5 4
+#define DQ_UCM_AGG_FLG_SHIFT_CF6 5
+#define DQ_UCM_AGG_FLG_SHIFT_RULE0EN 6
+#define DQ_UCM_AGG_FLG_SHIFT_RULE1EN 7
+
+/* UCM agg counter flag selection (FW) */
+#define DQ_UCM_ETH_PMD_TX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ETH_PMD_RX_ARM_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_ROCE_CQ_ARM_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF3)
+#define DQ_UCM_TOE_SLOW_PATH_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_TOE_DQ_CF_CMD (1 << DQ_UCM_AGG_FLG_SHIFT_CF5)
+
+/* TCM agg counter flag selection (HW) */
+#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
+#define DQ_TCM_AGG_FLG_SHIFT_CF1 1
+#define DQ_TCM_AGG_FLG_SHIFT_CF2 2
+#define DQ_TCM_AGG_FLG_SHIFT_CF3 3
+#define DQ_TCM_AGG_FLG_SHIFT_CF4 4
+#define DQ_TCM_AGG_FLG_SHIFT_CF5 5
+#define DQ_TCM_AGG_FLG_SHIFT_CF6 6
+#define DQ_TCM_AGG_FLG_SHIFT_CF7 7
+
+/* TCM agg counter flag selection (FW) */
+#define DQ_TCM_FCOE_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_FCOE_DUMMY_TIMER_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF2)
+#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_ISCSI_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_TOE_FLUSH_Q0_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_IWARP_POST_RQ_CF_CMD (1 << DQ_TCM_AGG_FLG_SHIFT_CF1)
+
+/* PWM address mapping */
+#define DQ_PWM_OFFSET_DPM_BASE 0x0
+#define DQ_PWM_OFFSET_DPM_END 0x27
+#define DQ_PWM_OFFSET_XCM16_BASE 0x40
+#define DQ_PWM_OFFSET_XCM32_BASE 0x44
+#define DQ_PWM_OFFSET_UCM16_BASE 0x48
+#define DQ_PWM_OFFSET_UCM32_BASE 0x4C
+#define DQ_PWM_OFFSET_UCM16_4 0x50
+#define DQ_PWM_OFFSET_TCM16_BASE 0x58
+#define DQ_PWM_OFFSET_TCM32_BASE 0x5C
+#define DQ_PWM_OFFSET_XCM_FLAGS 0x68
+#define DQ_PWM_OFFSET_UCM_FLAGS 0x69
+#define DQ_PWM_OFFSET_TCM_FLAGS 0x6B
+
+#define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE)
+#define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_16BIT (DQ_PWM_OFFSET_UCM16_4)
+#define DQ_PWM_OFFSET_UCM_RDMA_INT_TIMEOUT (DQ_PWM_OFFSET_UCM16_BASE + 2)
+#define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS)
+#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
+#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
+
+#define DQ_REGION_SHIFT (12)
+
+/* DPM */
+#define DQ_DPM_WQE_BUFF_SIZE (320)
+
+/* Conn type ranges */
+#define DQ_CONN_TYPE_RANGE_SHIFT (4)
/*****************/
/* QM CONSTANTS */
@@ -152,11 +371,17 @@
/* number of queues in a PF queue group */
#define QM_PF_QUEUE_GROUP_SIZE 8
+/* the size of a single queue element in bytes */
+#define QM_PQ_ELEMENT_SIZE 4
+
/* base number of Tx PQs in the CM PQ representation.
* should be used when storing PQ IDs in CM PQ registers and context
*/
#define CM_TX_PQ_BASE 0x200
+/* number of global Vport/QCN rate limiters */
+#define MAX_QM_GLOBAL_RLS 256
+
/* QM registers data */
#define QM_LINE_CRD_REG_WIDTH 16
#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1))
@@ -177,10 +402,14 @@
/* Number of Protocol Indices per Status Block */
#define PIS_PER_SB 12
+/* fsm is stopped or not valid for this sb */
#define CAU_HC_STOPPED_STATE 3
+/* fsm is working without interrupt coalescing for this sb*/
#define CAU_HC_DISABLE_STATE 4
+/* fsm is working with interrupt coalescing for this sb*/
#define CAU_HC_ENABLE_STATE 0
+
/*****************/
/* IGU CONSTANTS */
/*****************/
@@ -230,6 +459,17 @@
/* PXP CONSTANTS */
/*****************/
+/* Bars for Blocks */
+#define PXP_BAR_GRC 0
+#define PXP_BAR_TSDM 0
+#define PXP_BAR_USDM 0
+#define PXP_BAR_XSDM 0
+#define PXP_BAR_MSDM 0
+#define PXP_BAR_YSDM 0
+#define PXP_BAR_PSDM 0
+#define PXP_BAR_IGU 0
+#define PXP_BAR_DQ 1
+
/* PTT and GTT */
#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
@@ -277,6 +517,48 @@
(PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
+/* PF BAR */
+/*#define PXP_BAR0_START_GRC 0x1000 */
+/*#define PXP_BAR0_GRC_LENGTH 0xBFF000 */
+#define PXP_BAR0_START_GRC 0x0000
+#define PXP_BAR0_GRC_LENGTH 0x1C00000
+#define PXP_BAR0_END_GRC \
+ (PXP_BAR0_START_GRC + PXP_BAR0_GRC_LENGTH - 1)
+
+#define PXP_BAR0_START_IGU 0x1C00000
+#define PXP_BAR0_IGU_LENGTH 0x10000
+#define PXP_BAR0_END_IGU \
+ (PXP_BAR0_START_IGU + PXP_BAR0_IGU_LENGTH - 1)
+
+#define PXP_BAR0_START_TSDM 0x1C80000
+#define PXP_BAR0_SDM_LENGTH 0x40000
+#define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000
+#define PXP_BAR0_END_TSDM \
+ (PXP_BAR0_START_TSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_MSDM 0x1D00000
+#define PXP_BAR0_END_MSDM \
+ (PXP_BAR0_START_MSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_USDM 0x1D80000
+#define PXP_BAR0_END_USDM \
+ (PXP_BAR0_START_USDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_XSDM 0x1E00000
+#define PXP_BAR0_END_XSDM \
+ (PXP_BAR0_START_XSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_YSDM 0x1E80000
+#define PXP_BAR0_END_YSDM \
+ (PXP_BAR0_START_YSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_START_PSDM 0x1F00000
+#define PXP_BAR0_END_PSDM \
+ (PXP_BAR0_START_PSDM + PXP_BAR0_SDM_LENGTH - 1)
+
+#define PXP_BAR0_FIRST_INVALID_ADDRESS \
+ (PXP_BAR0_END_PSDM + 1)
+
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
@@ -285,6 +567,52 @@
#define PXP_NUM_ILT_RECORDS_K2 11000
#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
+
+/* Host Interface */
+#define PXP_QUEUES_ZONE_MAX_NUM 320
+
+
+
+
+/*****************/
+/* PRM CONSTANTS */
+/*****************/
+#define PRM_DMA_PAD_BYTES_NUM 2
+/*****************/
+/* SDMs CONSTANTS */
+/*****************/
+
+
+#define SDM_OP_GEN_TRIG_NONE 0
+#define SDM_OP_GEN_TRIG_WAKE_THREAD 1
+#define SDM_OP_GEN_TRIG_AGG_INT 2
+#define SDM_OP_GEN_TRIG_LOADER 4
+#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
+#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
+
+/***********************************************************/
+/* Completion types */
+/***********************************************************/
+
+#define SDM_COMP_TYPE_NONE 0
+#define SDM_COMP_TYPE_WAKE_THREAD 1
+#define SDM_COMP_TYPE_AGG_INT 2
+/* Send direct message to local CM and/or remote CMs. Destinations are defined
+ * by vector in CompParams.
+ */
+#define SDM_COMP_TYPE_CM 3
+#define SDM_COMP_TYPE_LOADER 4
+/* Send direct message to PXP (like "internal write" command) to write to remote
+ * Storm RAM via remote SDM
+ */
+#define SDM_COMP_TYPE_PXP 5
+/* Indicate error per thread */
+#define SDM_COMP_TYPE_INDICATE_ERROR 6
+#define SDM_COMP_TYPE_RELEASE_THREAD 7
+/* Write to local RAM as a completion */
+#define SDM_COMP_TYPE_RAM 8
+
+
/******************/
/* PBF CONSTANTS */
/******************/
@@ -299,37 +627,82 @@
/* PRS CONSTANTS */
/*****************/
+#define PRS_GFT_CAM_LINES_NO_MATCH 31
/* Async data KCQ CQE */
struct async_data {
+ /* Context ID of the connection */
__le32 cid;
+ /* Task Id of the task (for error that happened on a a task) */
__le16 itid;
+ /* error code - relevant only if the opcode indicates its an error */
u8 error_code;
+ /* internal fw debug parameter */
u8 fw_debug_param;
};
+/*
+ * Interrupt coalescing TimeSet
+ */
+struct coalescing_timeset {
+ u8 value;
+/* Interrupt coalescing TimeSet (timeout_ticks = TimeSet shl (TimerRes+1)) */
+#define COALESCING_TIMESET_TIMESET_MASK 0x7F
+#define COALESCING_TIMESET_TIMESET_SHIFT 0
+/* Only if this flag is set, timeset will take effect */
+#define COALESCING_TIMESET_VALID_MASK 0x1
+#define COALESCING_TIMESET_VALID_SHIFT 7
+};
+
+struct common_queue_zone {
+ __le16 ring_drv_data_consumer;
+ __le16 reserved;
+};
+
+/*
+ * ETH Rx producers data
+ */
+struct eth_rx_prod_data {
+ __le16 bd_prod /* BD producer. */;
+ __le16 cqe_prod /* CQE producer. */;
+};
+
struct regpair {
__le32 lo /* low word for reg-pair */;
__le32 hi /* high word for reg-pair */;
};
+/*
+ * Event Ring VF-PF Channel data
+ */
struct vf_pf_channel_eqe_data {
struct regpair msg_addr /* VF-PF message address */;
};
struct iscsi_eqe_data {
__le32 cid /* Context ID of the connection */;
- __le16 conn_id
/* Task Id of the task (for error that happened on a a task) */;
+ __le16 conn_id;
+/* error code - relevant only if the opcode indicates its an error */
u8 error_code;
- u8 reserved0;
+ u8 error_pdu_opcode_reserved;
+/* The processed PDUs opcode on which happened the error - updated for specific
+ * error codes, by default=0xFF
+ */
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_SHIFT 0
+/* Indication for driver is the error_pdu_opcode field has valid value */
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_MASK 0x1
+#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_VALID_SHIFT 6
+#define ISCSI_EQE_DATA_RESERVED0_MASK 0x1
+#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
/*
* Event Ring malicious VF data
*/
struct malicious_vf_eqe_data {
- u8 vf_id /* Malicious VF ID */; /* WARNING:CAMELCASE */
- u8 err_id /* Malicious VF error */;
+ u8 vfId /* Malicious VF ID */;
+ u8 errId /* Malicious VF error */;
__le16 reserved[3];
};
@@ -337,29 +710,34 @@ struct malicious_vf_eqe_data {
* Event Ring initial cleanup data
*/
struct initial_cleanup_eqe_data {
- u8 vf_id /* VF ID */; /* WARNING:CAMELCASE */
+ u8 vfId /* VF ID */;
u8 reserved[7];
};
-
+/*
+ * Event Data Union
+ */
union event_ring_data {
u8 bytes[8] /* Byte Array */;
struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
- struct regpair roce_handle /* WARNING:CAMELCASE */
/* Dedicated field for RoCE affiliated asynchronous error */;
+ struct regpair roceHandle;
struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
struct initial_cleanup_eqe_data vf_init_cleanup
/* VF Initial Cleanup data */;
+/* Host handle for the Async Completions */
+ struct regpair iwarp_handle;
};
/* Event Ring Entry */
struct event_ring_entry {
- u8 protocol_id;
- u8 opcode;
- __le16 reserved0;
- __le16 echo;
- u8 fw_return_code;
- u8 flags;
+ u8 protocol_id /* Event Protocol ID */;
+ u8 opcode /* Event Opcode */;
+ __le16 reserved0 /* Reserved */;
+ __le16 echo /* Echo value from ramrod data on the host */;
+ u8 fw_return_code /* FW return code for SP ramrods */;
+ u8 flags;
+/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
@@ -369,9 +747,9 @@ struct event_ring_entry {
/* Multi function mode */
enum mf_mode {
- SF,
- MF_OVLAN,
- MF_NPAR,
+ ERROR_MODE /* Unsupported mode */,
+ MF_OVLAN /* Multi function based on outer VLAN */,
+ MF_NPAR /* Multi function based on MAC address (NIC partitioning) */,
MAX_MF_MODE
};
@@ -390,35 +768,59 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+/*
+ * Ustorm Queue Zone
+ */
+struct ustorm_eth_queue_zone {
+/* Rx interrupt coalescing TimeSet */
+ struct coalescing_timeset int_coalescing_timeset;
+ u8 reserved[3];
+};
+
+
+struct ustorm_queue_zone {
+ struct ustorm_eth_queue_zone eth;
+ struct common_queue_zone common;
+};
+
/* status block structure */
struct cau_pi_entry {
- u32 prod;
+ __le32 prod;
+/* A per protocol indexPROD value. */
#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
+/* This value determines the TimeSet that the PI is associated with */
#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16
+/* Select the FSM within the SB */
#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1
#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23
+/* Select the FSM within the SB */
#define CAU_PI_ENTRY_RESERVED_MASK 0xFF
#define CAU_PI_ENTRY_RESERVED_SHIFT 24
};
/* status block structure */
struct cau_sb_entry {
- u32 data;
+ __le32 data;
+/* The SB PROD index which is sent to the IGU. */
#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
-#define CAU_SB_ENTRY_STATE0_MASK 0xF
+#define CAU_SB_ENTRY_STATE0_MASK 0xF /* RX state */
#define CAU_SB_ENTRY_STATE0_SHIFT 24
-#define CAU_SB_ENTRY_STATE1_MASK 0xF
+#define CAU_SB_ENTRY_STATE1_MASK 0xF /* TX state */
#define CAU_SB_ENTRY_STATE1_SHIFT 28
- u32 params;
+ __le32 params;
+/* Indicates the RX TimeSet that this SB is associated with. */
#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
+/* Indicates the TX TimeSet that this SB is associated with. */
#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7
+/* This value will determine the RX FSM timer resolution in ticks */
#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3
#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14
+/* This value will determine the TX FSM timer resolution in ticks */
#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3
#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16
#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF
@@ -427,6 +829,9 @@ struct cau_sb_entry {
#define CAU_SB_ENTRY_VF_VALID_SHIFT 26
#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF
#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27
+/* If set then indicates that the TPH STAG is equal to the SB number. Otherwise
+ * the STAG will be equal to all ones.
+ */
#define CAU_SB_ENTRY_TPH_MASK 0x1
#define CAU_SB_ENTRY_TPH_SHIFT 31
};
@@ -434,49 +839,173 @@ struct cau_sb_entry {
/* core doorbell data */
struct core_db_data {
u8 params;
+/* destination of doorbell (use enum db_dest) */
#define CORE_DB_DATA_DEST_MASK 0x3
#define CORE_DB_DATA_DEST_SHIFT 0
+/* aggregative command to CM (use enum db_agg_cmd_sel) */
#define CORE_DB_DATA_AGG_CMD_MASK 0x3
#define CORE_DB_DATA_AGG_CMD_SHIFT 2
-#define CORE_DB_DATA_BYPASS_EN_MASK 0x1
+#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
#define CORE_DB_DATA_BYPASS_EN_SHIFT 4
#define CORE_DB_DATA_RESERVED_MASK 0x1
#define CORE_DB_DATA_RESERVED_SHIFT 5
+/* aggregative value selection */
#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3
#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+/* bit for every DQ counter flags in CM context that DQ can increment */
u8 agg_flags;
__le16 spq_prod;
};
/* Enum of doorbell aggregative command selection */
enum db_agg_cmd_sel {
- DB_AGG_CMD_NOP,
- DB_AGG_CMD_SET,
- DB_AGG_CMD_ADD,
- DB_AGG_CMD_MAX,
+ DB_AGG_CMD_NOP /* No operation */,
+ DB_AGG_CMD_SET /* Set the value */,
+ DB_AGG_CMD_ADD /* Add the value */,
+ DB_AGG_CMD_MAX /* Set max of current and new value */,
MAX_DB_AGG_CMD_SEL
};
/* Enum of doorbell destination */
enum db_dest {
- DB_DEST_XCM,
- DB_DEST_UCM,
- DB_DEST_TCM,
+ DB_DEST_XCM /* TX doorbell to XCM */,
+ DB_DEST_UCM /* RX doorbell to UCM */,
+ DB_DEST_TCM /* RX doorbell to TCM */,
DB_NUM_DESTINATIONS,
MAX_DB_DEST
};
+
+/*
+ * Enum of doorbell DPM types
+ */
+enum db_dpm_type {
+ DPM_LEGACY /* Legacy DPM- to Xstorm RAM */,
+ DPM_ROCE /* RoCE DPM- to NIG */,
+/* L2 DPM inline- to PBF, with packet data on doorbell */
+ DPM_L2_INLINE,
+ DPM_L2_BD /* L2 DPM with BD- to PBF, with TX BD data on doorbell */,
+ MAX_DB_DPM_TYPE
+};
+
+/*
+ * Structure for doorbell data, in L2 DPM mode, for the first doorbell in a DPM
+ * burst
+ */
+struct db_l2_dpm_data {
+ __le16 icid /* internal CID */;
+ __le16 bd_prod /* bd producer value to update */;
+ __le32 params;
+/* Size in QWORD-s of the DPM burst */
+#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
+#define DB_L2_DPM_DATA_SIZE_SHIFT 0
+/* Type of DPM transaction (DPM_L2_INLINE or DPM_L2_BD) (use enum db_dpm_type)
+ */
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF /* number of BD-s */
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
+/* size of the packet to be transmitted in bytes */
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
+#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
+/* In DPM_L2_BD mode: the number of SGE-s */
+#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
+#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+};
+
+/*
+ * Structure for SGE in a DPM doorbell of type DPM_L2_BD
+ */
+struct db_l2_dpm_sge {
+ struct regpair addr /* Single continuous buffer */;
+ __le16 nbytes /* Number of bytes in this BD. */;
+ __le16 bitfields;
+/* The TPH STAG index value */
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF
+#define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0
+#define DB_L2_DPM_SGE_RESERVED0_MASK 0x3
+#define DB_L2_DPM_SGE_RESERVED0_SHIFT 9
+/* Indicate if ST hint is requested or not */
+#define DB_L2_DPM_SGE_ST_VALID_MASK 0x1
+#define DB_L2_DPM_SGE_ST_VALID_SHIFT 11
+#define DB_L2_DPM_SGE_RESERVED1_MASK 0xF
+#define DB_L2_DPM_SGE_RESERVED1_SHIFT 12
+ __le32 reserved2;
+};
+
/* Structure for doorbell address, in legacy mode */
struct db_legacy_addr {
__le32 addr;
#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3
#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0
+/* doorbell extraction mode specifier- 0 if not used */
#define DB_LEGACY_ADDR_DEMS_MASK 0x7
#define DB_LEGACY_ADDR_DEMS_SHIFT 2
-#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF
+#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF /* internal CID */
#define DB_LEGACY_ADDR_ICID_SHIFT 5
};
+/*
+ * Structure for doorbell address, in PWM mode
+ */
+struct db_pwm_addr {
+ __le32 addr;
+#define DB_PWM_ADDR_RESERVED0_MASK 0x7
+#define DB_PWM_ADDR_RESERVED0_SHIFT 0
+/* Offset in PWM address space */
+#define DB_PWM_ADDR_OFFSET_MASK 0x7F
+#define DB_PWM_ADDR_OFFSET_SHIFT 3
+#define DB_PWM_ADDR_WID_MASK 0x3 /* Window ID */
+#define DB_PWM_ADDR_WID_SHIFT 10
+#define DB_PWM_ADDR_DPI_MASK 0xFFFF /* Doorbell page ID */
+#define DB_PWM_ADDR_DPI_SHIFT 12
+#define DB_PWM_ADDR_RESERVED1_MASK 0xF
+#define DB_PWM_ADDR_RESERVED1_SHIFT 28
+};
+
+/*
+ * Parameters to RoCE firmware, passed in EDPM doorbell
+ */
+struct db_roce_dpm_params {
+ __le32 params;
+/* Size in QWORD-s of the DPM burst */
+#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
+/* Type of DPM transacation (DPM_ROCE) (use enum db_dpm_type) */
+#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
+/* opcode for ROCE operation */
+#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
+/* the size of the WQE payload in bytes */
+#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
+/* RoCE completion flag */
+#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
+#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
+};
+
+/*
+ * Structure for doorbell data, in ROCE DPM mode, for the first doorbell in a
+ * DPM burst
+ */
+struct db_roce_dpm_data {
+ __le16 icid /* internal CID */;
+ __le16 prod_val /* aggregated value to update */;
+/* parameters passed to RoCE firmware */
+ struct db_roce_dpm_params params;
+};
+
/* Igu interrupt command */
enum igu_int_cmd {
IGU_INT_ENABLE = 0,
@@ -488,22 +1017,25 @@ enum igu_int_cmd {
/* IGU producer or consumer update command */
struct igu_prod_cons_update {
- u32 sb_id_and_flags;
+ __le32 sb_id_and_flags;
#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24
+/* interrupt enable/disable/nop (use enum igu_int_cmd) */
#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3
#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25
+/* (use enum igu_seg_access) */
#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1
#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27
#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1
#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28
#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3
#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
+/* must always be set cleared (use enum command_type_bit) */
#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
- u32 reserved1;
+ __le32 reserved1;
};
/* Igu segments access for default status block only */
@@ -513,46 +1045,111 @@ enum igu_seg_access {
MAX_IGU_SEG_ACCESS
};
+
+/*
+ * Enumeration for L3 type field of parsing_and_err_flags_union. L3Type:
+ * 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled according
+ * to the last-ethertype)
+ */
+enum l3_type {
+ e_l3Type_unknown,
+ e_l3Type_ipv4,
+ e_l3Type_ipv6,
+ MAX_L3_TYPE
+};
+
+
+/*
+ * Enumeration for l4Protocol field of parsing_and_err_flags_union. L4-protocol
+ * 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and its not the
+ * first fragment, the protocol-type should be set to none.
+ */
+enum l4_protocol {
+ e_l4Protocol_none,
+ e_l4Protocol_tcp,
+ e_l4Protocol_udp,
+ MAX_L4_PROTOCOL
+};
+
+
+/*
+ * Parsing and error flags field.
+ */
struct parsing_and_err_flags {
__le16 flags;
+/* L3Type: 0 - unknown (not ip) ,1 - Ipv4, 2 - Ipv6 (this field can be filled
+ * according to the last-ethertype) (use enum l3_type)
+ */
#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0
+/* L4-protocol 0 - none, 1 - TCP, 2- UDP. if the packet is IPv4 fragment, and
+ * its not the first fragment, the protocol-type should be set to none.
+ * (use enum l4_protocol)
+ */
#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3
#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2
+/* Set if the packet is IPv4 fragment. */
#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1
#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4
+/* Set if VLAN tag exists. Invalid if tunnel type are IP GRE or IP GENEVE. */
#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5
+/* Set if L4 checksum was calculated. */
#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1
#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6
+/* Set for PTP packet. */
#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7
+/* Set if PTP timestamp recorded. */
#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8
+/* Set if either version-mismatch or hdr-len-error or ipv4-cksm is set or ipv6
+ * ver mismatch
+ */
#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9
+/* Set if L4 checksum validation failed. Valid only if L4 checksum was
+ * calculated.
+ */
#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10
+/* Set if GRE/VXLAN/GENEVE tunnel detected. */
#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11
+/* Set if VLAN tag exists in tunnel header. */
#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12
+/* Set if either tunnel-ipv4-version-mismatch or tunnel-ipv4-hdr-len-error or
+ * tunnel-ipv4-cksm is set or tunneling ipv6 ver mismatch
+ */
#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13
+/* Set if GRE or VXLAN/GENEVE UDP checksum was calculated. */
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14
+/* Set if tunnel L4 checksum validation failed. Valid only if tunnel L4 checksum
+ * was calculated.
+ */
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1
#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15
};
+
+/*
+ * Pb context
+ */
+struct pb_context {
+ __le32 crc[4];
+};
+
/* Concrete Function ID. */
struct pxp_concrete_fid {
__le16 fid;
-#define PXP_CONCRETE_FID_PFID_MASK 0xF
+#define PXP_CONCRETE_FID_PFID_MASK 0xF /* Parent PFID */
#define PXP_CONCRETE_FID_PFID_SHIFT 0
-#define PXP_CONCRETE_FID_PORT_MASK 0x3
+#define PXP_CONCRETE_FID_PORT_MASK 0x3 /* port number */
#define PXP_CONCRETE_FID_PORT_SHIFT 4
-#define PXP_CONCRETE_FID_PATH_MASK 0x1
+#define PXP_CONCRETE_FID_PATH_MASK 0x1 /* path number */
#define PXP_CONCRETE_FID_PATH_SHIFT 6
#define PXP_CONCRETE_FID_VFVALID_MASK 0x1
#define PXP_CONCRETE_FID_VFVALID_SHIFT 7
@@ -611,6 +1208,103 @@ struct pxp_ptt_entry {
struct pxp_pretend_cmd pretend;
};
+
+/*
+ * VF Zone A Permission Register.
+ */
+struct pxp_vf_zone_a_permission {
+ __le32 control;
+#define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF
+#define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0
+#define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1
+#define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF
+#define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16
+};
+
+
+/*
+ * Rdif context
+ */
+struct rdif_task_context {
+ __le32 initialRefTag;
+ __le16 appTagValue;
+ __le16 appTagMask;
+ u8 flags0;
+#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
+#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
+#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+/* 0 = IP checksum, 1 = CRC */
+#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
+#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
+#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
+#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+/* 1/2/3 - Protection Type */
+#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
+#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+/* 0=0x0000, 1=0xffff */
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+/* Keep reference tag constant */
+#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
+#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
+ u8 partialDifData[7];
+ __le16 partialCrcValue;
+ __le16 partialChecksumValue;
+ __le32 offsetInIO;
+ __le16 flags1;
+#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
+#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
+#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
+#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
+#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
+#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
+#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+/* 0=None, 1=DIF, 2=DIX */
+#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
+#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+/* DIF tag right at the beginning of DIF interval */
+#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
+#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
+/* 0=None, 1=DIF */
+#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
+#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
+/* Forward application tag with mask */
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
+/* Forward reference tag with mask */
+#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
+ __le16 state;
+#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
+#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
+#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
+#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
+#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
+/* mask for refernce tag handling */
+#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
+#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
+ __le32 reserved2;
+};
+
/* RSS hash type */
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
@@ -640,20 +1334,6 @@ struct status_block {
#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
};
-/* @DPDK */
-#define X_FINAL_CLEANUP_AGG_INT 1
-#define SDM_COMP_TYPE_AGG_INT 2
-#define MAX_NUM_LL2_RX_QUEUES 32
-#define QM_PQ_ELEMENT_SIZE 4
-#define PXP_VF_BAR0_START_IGU 0
-#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3
-
-#define TSTORM_QZONE_SIZE 8
-#define MSTORM_QZONE_SIZE 16
-#define USTORM_QZONE_SIZE 8
-#define XSTORM_QZONE_SIZE 0
-#define YSTORM_QZONE_SIZE 8
-#define PSTORM_QZONE_SIZE 0
/* VF BAR */
#define PXP_VF_BAR0 0
@@ -708,7 +1388,165 @@ struct status_block {
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+/*
+ * Tdif context
+ */
+struct tdif_task_context {
+ __le32 initialRefTag;
+ __le16 appTagValue;
+ __le16 appTagMask;
+ __le16 partialCrcValueB;
+ __le16 partialChecksumValueB;
+ __le16 stateB;
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
+ u8 reserved1;
+ u8 flags0;
+#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
+#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
+#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+/* 0 = IP checksum, 1 = CRC */
+#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
+#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
+#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
+#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+/* 1/2/3 - Protection Type */
+#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
+#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+/* 0=0x0000, 1=0xffff */
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
+ __le32 flags1;
+#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
+#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
+#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
+#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
+#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
+#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
+#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+/* 0=None, 1=DIF, 2=DIX */
+#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
+#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+/* DIF tag right at the beginning of DIF interval */
+#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
+#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
+/* reserved */
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
+/* 0=None, 1=DIF */
+#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
+#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
+/* mask for refernce tag handling */
+#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
+#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
+/* Forward application tag with mask */
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
+/* Forward reference tag with mask */
+#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
+/* Keep reference tag constant */
+#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
+#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
+ __le32 offsetInIOB;
+ __le16 partialCrcValueA;
+ __le16 partialChecksumValueA;
+ __le32 offsetInIOA;
+ u8 partialDifDataA[8];
+ u8 partialDifDataB[8];
+};
+
+
+/*
+ * Timers context
+ */
+struct timers_context {
+ __le32 logical_client_0;
+/* Expiration time of logical client 0 */
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
+/* Valid bit of logical client 0 */
+#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
+/* Active bit of logical client 0 */
+#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
+ __le32 logical_client_1;
+/* Expiration time of logical client 1 */
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
+/* Valid bit of logical client 1 */
+#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
+/* Active bit of logical client 1 */
+#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
+ __le32 logical_client_2;
+/* Expiration time of logical client 2 */
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
+/* Valid bit of logical client 2 */
+#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
+#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
+/* Active bit of logical client 2 */
+#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
+#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED2_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED2_SHIFT 30
+ __le32 host_expiration_fields;
+/* Expiration time on host (closest one) */
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+/* Valid bit of host expiration */
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
+#define TIMERS_CONTEXT_RESERVED3_MASK 0x7
+#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
+};
+
+
+/*
+ * Enum for next_protocol field of tunnel_parsing_flags
+ */
+enum tunnel_next_protocol {
+ e_unknown = 0,
+ e_l2 = 1,
+ e_ipv4 = 2,
+ e_ipv6 = 3,
+ MAX_TUNNEL_NEXT_PROTOCOL
+};
#endif /* __COMMON_HSI__ */
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index d682a785..907b35b9 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -9,18 +9,33 @@
#ifndef __ECORE_H
#define __ECORE_H
+/* @DPDK */
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#define CONFIG_ECORE_BINARY_FW
+#undef CONFIG_ECORE_ZIPPED_FW
+
+#ifdef CONFIG_ECORE_ZIPPED_FW
+#include <zlib.h>
+#endif
+
#include "ecore_hsi_common.h"
-#include "ecore_hsi_tools.h"
+#include "ecore_hsi_debug_tools.h"
+#include "ecore_hsi_init_func.h"
+#include "ecore_hsi_init_tool.h"
#include "ecore_proto_if.h"
#include "mcp_public.h"
#define MAX_HWFNS_PER_DEVICE (4)
-#define NAME_SIZE 64 /* @DPDK */
+#define NAME_SIZE 128 /* @DPDK */
#define VER_SIZE 16
-/* @DPDK ARRAY_DECL */
#define ECORE_WFQ_UNIT 100
-#include "../qede_logs.h" /* @DPDK */
+#include "../qede_logs.h" /* @DPDK */
+#define ISCSI_BDQ_ID(_port_id) (_port_id)
+#define FCOE_BDQ_ID(_port_id) (_port_id + 2)
/* Constants */
#define ECORE_WID_SIZE (1024)
@@ -75,12 +90,11 @@ do { \
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
- (cid * ECORE_PF_DEMS_SIZE);
+ (cid * ECORE_PF_DEMS_SIZE);
return db_addr;
}
-/* @DPDK: This is a backport from latest ecore for TSS fix */
static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -93,6 +107,7 @@ static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \
~((1 << (p_hwfn->p_dev->cache_shift)) - 1))
+#ifndef LINUX_REMOVE
#ifndef U64_HI
#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
#endif
@@ -100,13 +115,14 @@ static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
#ifndef U64_LO
#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
#endif
+#endif
#ifndef __EXTRACT__LINUX__
enum DP_LEVEL {
- ECORE_LEVEL_VERBOSE = 0x0,
- ECORE_LEVEL_INFO = 0x1,
- ECORE_LEVEL_NOTICE = 0x2,
- ECORE_LEVEL_ERR = 0x3,
+ ECORE_LEVEL_VERBOSE = 0x0,
+ ECORE_LEVEL_INFO = 0x1,
+ ECORE_LEVEL_NOTICE = 0x2,
+ ECORE_LEVEL_ERR = 0x3,
};
#define ECORE_LOG_LEVEL_SHIFT (30)
@@ -116,31 +132,34 @@ enum DP_LEVEL {
enum DP_MODULE {
#ifndef LINUX_REMOVE
- ECORE_MSG_DRV = 0x0001,
- ECORE_MSG_PROBE = 0x0002,
- ECORE_MSG_LINK = 0x0004,
- ECORE_MSG_TIMER = 0x0008,
- ECORE_MSG_IFDOWN = 0x0010,
- ECORE_MSG_IFUP = 0x0020,
- ECORE_MSG_RX_ERR = 0x0040,
- ECORE_MSG_TX_ERR = 0x0080,
- ECORE_MSG_TX_QUEUED = 0x0100,
- ECORE_MSG_INTR = 0x0200,
- ECORE_MSG_TX_DONE = 0x0400,
- ECORE_MSG_RX_STATUS = 0x0800,
- ECORE_MSG_PKTDATA = 0x1000,
- ECORE_MSG_HW = 0x2000,
- ECORE_MSG_WOL = 0x4000,
+ ECORE_MSG_DRV = 0x0001,
+ ECORE_MSG_PROBE = 0x0002,
+ ECORE_MSG_LINK = 0x0004,
+ ECORE_MSG_TIMER = 0x0008,
+ ECORE_MSG_IFDOWN = 0x0010,
+ ECORE_MSG_IFUP = 0x0020,
+ ECORE_MSG_RX_ERR = 0x0040,
+ ECORE_MSG_TX_ERR = 0x0080,
+ ECORE_MSG_TX_QUEUED = 0x0100,
+ ECORE_MSG_INTR = 0x0200,
+ ECORE_MSG_TX_DONE = 0x0400,
+ ECORE_MSG_RX_STATUS = 0x0800,
+ ECORE_MSG_PKTDATA = 0x1000,
+ ECORE_MSG_HW = 0x2000,
+ ECORE_MSG_WOL = 0x4000,
#endif
- ECORE_MSG_SPQ = 0x10000,
- ECORE_MSG_STATS = 0x20000,
- ECORE_MSG_DCB = 0x40000,
- ECORE_MSG_IOV = 0x80000,
- ECORE_MSG_SP = 0x100000,
- ECORE_MSG_STORAGE = 0x200000,
- ECORE_MSG_CXT = 0x800000,
- ECORE_MSG_ILT = 0x2000000,
- ECORE_MSG_DEBUG = 0x8000000,
+ ECORE_MSG_SPQ = 0x10000,
+ ECORE_MSG_STATS = 0x20000,
+ ECORE_MSG_DCB = 0x40000,
+ ECORE_MSG_IOV = 0x80000,
+ ECORE_MSG_SP = 0x100000,
+ ECORE_MSG_STORAGE = 0x200000,
+ ECORE_MSG_OOO = 0x200000,
+ ECORE_MSG_CXT = 0x800000,
+ ECORE_MSG_LL2 = 0x1000000,
+ ECORE_MSG_ILT = 0x2000000,
+ ECORE_MSG_RDMA = 0x4000000,
+ ECORE_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
#endif
@@ -159,13 +178,14 @@ struct ecore_sb_attn_info;
struct ecore_cxt_mngr;
struct ecore_dma_mem;
struct ecore_sb_sp_info;
+struct ecore_ll2_info;
struct ecore_igu_info;
struct ecore_mcp_info;
struct ecore_dcbx_info;
struct ecore_rt_data {
- u32 *init_val;
- bool *b_valid;
+ u32 *init_val;
+ bool *b_valid;
};
enum ecore_tunn_mode {
@@ -181,68 +201,50 @@ enum ecore_tunn_clss {
ECORE_TUNN_CLSS_MAC_VNI,
ECORE_TUNN_CLSS_INNER_MAC_VLAN,
ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
MAX_ECORE_TUNN_CLSS,
};
struct ecore_tunn_start_params {
unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
};
struct ecore_tunn_update_params {
unsigned long tunn_mode_update_mask;
unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_rx_pf_clss;
- u8 update_tx_pf_clss;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
-};
-
-struct ecore_hw_sriov_info {
- /* standard SRIOV capability fields, mostly for debugging */
- int pos; /* capability position */
- int nres; /* number of resources */
- u32 cap; /* SR-IOV Capabilities */
- u16 ctrl; /* SR-IOV Control */
- u16 total_vfs; /* total VFs associated with the PF */
- u16 num_vfs; /* number of vfs that have been started */
- u64 active_vfs[3]; /* bitfield of active vfs */
-#define ECORE_IS_VF_ACTIVE(_p_dev, _rel_vf_id) \
- (!!(_p_dev->sriov_info.active_vfs[_rel_vf_id / 64] & \
- (1ULL << (_rel_vf_id % 64))))
- u16 initial_vfs; /* initial VFs associated with the PF */
- u16 nr_virtfn; /* number of VFs available */
- u16 offset; /* first VF Routing ID offset */
- u16 stride; /* following VF stride */
- u16 vf_device_id; /* VF device id */
- u32 pgsz; /* page size for BAR alignment */
- u8 link; /* Function Dependency Link */
-
- bool b_hw_channel; /* Whether PF uses the HW-channel */
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
};
/* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections
- * 2. The Ethernet personality may support also the RoCE protocol
+ * 2. The Ethernet personality may support also the RoCE/iWARP protocol
*/
enum ecore_pci_personality {
ECORE_PCI_ETH,
- ECORE_PCI_DEFAULT /* default in shmem */
+ ECORE_PCI_FCOE,
+ ECORE_PCI_ISCSI,
+ ECORE_PCI_ETH_ROCE,
+ ECORE_PCI_IWARP,
+ ECORE_PCI_DEFAULT /* default in shmem */
};
/* All VFs are symmetric, all counters are PF + all VFs */
@@ -254,11 +256,10 @@ struct ecore_qm_iids {
#define MAX_PF_PER_PORT 8
-/*@@@TBD MK RESC: need to remove and use MCP interface instead */
/* HW / FW resources, output of features supported below, most information
* is received from MFW.
*/
-enum ECORE_RESOURCES {
+enum ecore_resources {
ECORE_SB,
ECORE_L2_QUEUE,
ECORE_VPORT,
@@ -267,24 +268,30 @@ enum ECORE_RESOURCES {
ECORE_RL,
ECORE_MAC,
ECORE_VLAN,
+ ECORE_RDMA_CNQ_RAM,
ECORE_ILT,
+ ECORE_LL2_QUEUE,
ECORE_CMDQS_CQS,
- ECORE_MAX_RESC,
+ ECORE_RDMA_STATS_QUEUE,
+ ECORE_MAX_RESC, /* must be last */
};
/* Features that require resources, given as input to the resource management
* algorithm, the output are the resources above
*/
-enum ECORE_FEATURE {
+enum ecore_feature {
ECORE_PF_L2_QUE,
ECORE_PF_TC,
ECORE_VF,
ECORE_EXTRA_VF_QUE,
ECORE_VMQ,
+ ECORE_RDMA_CNQ,
+ ECORE_ISCSI_CQ,
+ ECORE_FCOE_CQ,
ECORE_MAX_FEATURES,
};
-enum ECORE_PORT_MODE {
+enum ecore_port_mode {
ECORE_PORT_MODE_DE_2X40G,
ECORE_PORT_MODE_DE_2X50G,
ECORE_PORT_MODE_DE_1X100G,
@@ -293,11 +300,16 @@ enum ECORE_PORT_MODE {
ECORE_PORT_MODE_DE_4X20G,
ECORE_PORT_MODE_DE_1X40G,
ECORE_PORT_MODE_DE_2X25G,
- ECORE_PORT_MODE_DE_1X25G
+ ECORE_PORT_MODE_DE_1X25G,
+ ECORE_PORT_MODE_DE_4X25G,
};
enum ecore_dev_cap {
ECORE_DEV_CAP_ETH,
+ ECORE_DEV_CAP_FCOE,
+ ECORE_DEV_CAP_ISCSI,
+ ECORE_DEV_CAP_ROCE,
+ ECORE_DEV_CAP_IWARP
};
#ifndef __EXTRACT__LINUX__
@@ -320,16 +332,26 @@ struct ecore_hw_info {
u32 resc_num[ECORE_MAX_RESC];
u32 feat_num[ECORE_MAX_FEATURES];
-#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
-#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
-#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+ #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+ #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+ #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
RESC_NUM(_p_hwfn, resc))
-#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+ #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+ /* Amount of traffic classes HW supports */
+ u8 num_hw_tc;
+
+/* Amount of TCs which should be active according to DCBx or upper layer driver
+ * configuration
+ */
- u8 num_tc;
+ u8 num_active_tc;
+
+ /* Traffic class used for tcp out of order traffic */
u8 ooo_tc;
+
+ /* The traffic class used by PF for it's offloaded protocol */
u8 offload_tc;
- u8 non_offload_tc;
u32 concrete_fid;
u16 opaque_fid;
@@ -337,25 +359,29 @@ struct ecore_hw_info {
u32 part_num[4];
unsigned char hw_mac_addr[ETH_ALEN];
+ u64 node_wwn; /* For FCoE only */
+ u64 port_wwn; /* For FCoE only */
+
+ u16 num_iscsi_conns;
+ u16 num_fcoe_conns;
struct ecore_igu_info *p_igu_info;
/* Sriov */
- u32 first_vf_in_pf;
u8 max_chains_per_vf;
u32 port_mode;
- u32 hw_mode;
+ u32 hw_mode;
unsigned long device_capabilities;
};
struct ecore_hw_cid_data {
- u32 cid;
- bool b_cid_allocated;
- u8 vfid; /* 1-based; 0 signals this is for a PF */
+ u32 cid;
+ bool b_cid_allocated;
+ u8 vfid; /* 1-based; 0 signals this is for a PF */
/* Additional identifiers */
- u16 opaque_fid;
- u8 vport_id;
+ u16 opaque_fid;
+ u8 vport_id;
};
/* maximun size of read/write commands (HW limit) */
@@ -363,7 +389,7 @@ struct ecore_hw_cid_data {
struct ecore_dmae_info {
/* Mutex for synchronizing access to functions */
- osal_mutex_t mutex;
+ osal_mutex_t mutex;
u8 channel;
@@ -387,33 +413,34 @@ struct ecore_dmae_info {
};
struct ecore_wfq_data {
- u32 default_min_speed; /* When wfq feature is not configured */
- u32 min_speed; /* when feature is configured for any 1 vport */
+ u32 default_min_speed; /* When wfq feature is not configured */
+ u32 min_speed; /* when feature is configured for any 1 vport */
bool configured;
};
struct ecore_qm_info {
- struct init_qm_pq_params *qm_pq_params;
+ struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params;
- struct init_qm_port_params *qm_port_params;
- u16 start_pq;
- u8 start_vport;
- u8 pure_lb_pq;
- u8 offload_pq;
- u8 pure_ack_pq;
- u8 ooo_pq;
- u8 vf_queues_offset;
- u16 num_pqs;
- u16 num_vf_pqs;
- u8 num_vports;
- u8 max_phys_tcs_per_port;
- bool pf_rl_en;
- bool pf_wfq_en;
- bool vport_rl_en;
- bool vport_wfq_en;
- u8 pf_wfq;
- u32 pf_rl;
- struct ecore_wfq_data *wfq_data;
+ struct init_qm_port_params *qm_port_params;
+ u16 start_pq;
+ u8 start_vport;
+ u8 pure_lb_pq;
+ u8 offload_pq;
+ u8 pure_ack_pq;
+ u8 ooo_pq;
+ u8 vf_queues_offset;
+ u16 num_pqs;
+ u16 num_vf_pqs;
+ u8 num_vports;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct ecore_wfq_data *wfq_data;
+ u8 num_pf_rls;
};
struct storm_stats {
@@ -421,9 +448,6 @@ struct storm_stats {
u32 len;
};
-#define CONFIG_ECORE_BINARY_FW
-#define CONFIG_ECORE_ZIPPED_FW
-
struct ecore_fw_data {
#ifdef CONFIG_ECORE_BINARY_FW
struct fw_ver_info *fw_ver_info;
@@ -435,106 +459,119 @@ struct ecore_fw_data {
};
struct ecore_hwfn {
- struct ecore_dev *p_dev;
- u8 my_id; /* ID inside the PF */
+ struct ecore_dev *p_dev;
+ u8 my_id; /* ID inside the PF */
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
- u8 rel_pf_id; /* Relative to engine */
- u8 abs_pf_id;
-#define ECORE_PATH_ID(_p_hwfn) \
+ u8 rel_pf_id; /* Relative to engine*/
+ u8 abs_pf_id;
+ #define ECORE_PATH_ID(_p_hwfn) \
(ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
- u8 port_id;
- bool b_active;
+ u8 port_id;
+ bool b_active;
- u32 dp_module;
- u8 dp_level;
- char name[NAME_SIZE];
- void *dp_ctx;
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+ void *dp_ctx;
- bool first_on_engine;
- bool hw_init_done;
+ bool first_on_engine;
+ bool hw_init_done;
- u8 num_funcs_on_engine;
+ u8 num_funcs_on_engine;
+ u8 enabled_func_idx;
/* BAR access */
- void OSAL_IOMEM *regview;
- void OSAL_IOMEM *doorbells;
- u64 db_phys_addr;
- unsigned long db_size;
+ void OSAL_IOMEM *regview;
+ void OSAL_IOMEM *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
/* PTT pool */
- struct ecore_ptt_pool *p_ptt_pool;
+ struct ecore_ptt_pool *p_ptt_pool;
/* HW info */
- struct ecore_hw_info hw_info;
+ struct ecore_hw_info hw_info;
/* rt_array (for init-tool) */
- struct ecore_rt_data rt_data;
+ struct ecore_rt_data rt_data;
/* SPQ */
- struct ecore_spq *p_spq;
+ struct ecore_spq *p_spq;
/* EQ */
- struct ecore_eq *p_eq;
+ struct ecore_eq *p_eq;
- /* Consolidate Q */
- struct ecore_consq *p_consq;
+ /* Consolidate Q*/
+ struct ecore_consq *p_consq;
/* Slow-Path definitions */
- osal_dpc_t sp_dpc;
- bool b_sp_dpc_enabled;
+ osal_dpc_t sp_dpc;
+ bool b_sp_dpc_enabled;
- struct ecore_ptt *p_main_ptt;
- struct ecore_ptt *p_dpc_ptt;
+ struct ecore_ptt *p_main_ptt;
+ struct ecore_ptt *p_dpc_ptt;
- struct ecore_sb_sp_info *p_sp_sb;
- struct ecore_sb_attn_info *p_sb_attn;
+ struct ecore_sb_sp_info *p_sp_sb;
+ struct ecore_sb_attn_info *p_sb_attn;
/* Protocol related */
- struct ecore_ooo_info *p_ooo_info;
- struct ecore_pf_params pf_params;
+ bool using_ll2;
+ struct ecore_ll2_info *p_ll2_info;
+ struct ecore_ooo_info *p_ooo_info;
+ struct ecore_iscsi_info *p_iscsi_info;
+ struct ecore_fcoe_info *p_fcoe_info;
+ struct ecore_rdma_info *p_rdma_info;
+ struct ecore_pf_params pf_params;
+
+ bool b_rdma_enabled_in_prs;
+ u32 rdma_prs_search_reg;
/* Array of sb_info of all status blocks */
- struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
- u16 num_sbs;
+ struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+ u16 num_sbs;
- struct ecore_cxt_mngr *p_cxt_mngr;
+ struct ecore_cxt_mngr *p_cxt_mngr;
- /* Flag indicating whether interrupts are enabled or not */
- bool b_int_enabled;
- bool b_int_requested;
+ /* Flag indicating whether interrupts are enabled or not*/
+ bool b_int_enabled;
+ bool b_int_requested;
/* True if the driver requests for the link */
- bool b_drv_link_init;
+ bool b_drv_link_init;
- struct ecore_vf_iov *vf_iov_info;
- struct ecore_pf_iov *pf_iov_info;
- struct ecore_mcp_info *mcp_info;
- struct ecore_dcbx_info *p_dcbx_info;
+ struct ecore_vf_iov *vf_iov_info;
+ struct ecore_pf_iov *pf_iov_info;
+ struct ecore_mcp_info *mcp_info;
+ struct ecore_dcbx_info *p_dcbx_info;
- struct ecore_hw_cid_data *p_tx_cids;
- struct ecore_hw_cid_data *p_rx_cids;
+ struct ecore_hw_cid_data *p_tx_cids;
+ struct ecore_hw_cid_data *p_rx_cids;
- struct ecore_dmae_info dmae_info;
+ struct ecore_dmae_info dmae_info;
/* QM init */
- struct ecore_qm_info qm_info;
+ struct ecore_qm_info qm_info;
- /* Buffer for unzipping firmware data */
#ifdef CONFIG_ECORE_ZIPPED_FW
+ /* Buffer for unzipping firmware data */
void *unzip_buf;
#endif
- struct dbg_tools_data dbg_info;
+ struct dbg_tools_data dbg_info;
- struct z_stream_s *stream;
+ struct z_stream_s *stream;
/* PWM region specific data */
- u32 dpi_size;
- u32 dpi_count;
- u32 dpi_start_offset; /* this is used to
- * calculate th
- * doorbell address
- */
+ u32 dpi_size;
+ u32 dpi_count;
+ u32 dpi_start_offset; /* this is used to
+ * calculate th
+ * doorbell address
+ */
+
+ /* If one of the following is set then EDPM shouldn't be used */
+ u8 dcbx_no_edpm;
+ u8 db_bar_no_edpm;
};
#ifndef __EXTRACT__LINUX__
@@ -545,136 +582,177 @@ enum ecore_mf_mode {
};
#endif
+/* @DPDK */
+struct ecore_dbg_feature {
+ u8 *dump_buf;
+ u32 buf_size;
+ u32 dumped_dwords;
+};
+
+enum qed_dbg_features {
+ DBG_FEATURE_BUS,
+ DBG_FEATURE_GRC,
+ DBG_FEATURE_IDLE_CHK,
+ DBG_FEATURE_MCP_TRACE,
+ DBG_FEATURE_REG_FIFO,
+ DBG_FEATURE_PROTECTION_OVERRIDE,
+ DBG_FEATURE_NUM
+};
+
struct ecore_dev {
- u32 dp_module;
- u8 dp_level;
- char name[NAME_SIZE];
- void *dp_ctx;
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+ void *dp_ctx;
- u8 type;
+ u8 type;
#define ECORE_DEV_TYPE_BB (0 << 0)
#define ECORE_DEV_TYPE_AH (1 << 0)
/* Translate type/revision combo into the proper conditions */
#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB)
-#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && \
- CHIP_REV_IS_A0(dev))
-#define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && \
- CHIP_REV_IS_B0(dev))
+#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_A0(dev))
+#ifndef ASIC_ONLY
+#define ECORE_IS_BB_B0(dev) ((ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev)) || \
+ (CHIP_REV_IS_TEDIBEAR(dev)))
+#else
+#define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev))
+#endif
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
-#define ECORE_GET_TYPE(dev) (ECORE_IS_BB_A0(dev) ? CHIP_BB_A0 : \
- ECORE_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
u16 vendor_id;
u16 device_id;
- u16 chip_num;
-#define CHIP_NUM_MASK 0xffff
-#define CHIP_NUM_SHIFT 16
+ u16 chip_num;
+ #define CHIP_NUM_MASK 0xffff
+ #define CHIP_NUM_SHIFT 16
- u16 chip_rev;
-#define CHIP_REV_MASK 0xf
-#define CHIP_REV_SHIFT 12
+ u16 chip_rev;
+ #define CHIP_REV_MASK 0xf
+ #define CHIP_REV_SHIFT 12
#ifndef ASIC_ONLY
-#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
-#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
-#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
-#define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
+ #define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
+ #define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
+ #define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
+ #define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
CHIP_REV_IS_EMUL_B0(_p_dev))
-#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
-#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
-#define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
+ #define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
+ #define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
+ #define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
CHIP_REV_IS_FPGA_B0(_p_dev))
-#define CHIP_REV_IS_SLOW(_p_dev) \
+ #define CHIP_REV_IS_SLOW(_p_dev) \
(CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
-#define CHIP_REV_IS_A0(_p_dev) \
+ #define CHIP_REV_IS_A0(_p_dev) \
(CHIP_REV_IS_EMUL_A0(_p_dev) || \
CHIP_REV_IS_FPGA_A0(_p_dev) || \
!(_p_dev)->chip_rev)
-#define CHIP_REV_IS_B0(_p_dev) \
+ #define CHIP_REV_IS_B0(_p_dev) \
(CHIP_REV_IS_EMUL_B0(_p_dev) || \
CHIP_REV_IS_FPGA_B0(_p_dev) || \
(_p_dev)->chip_rev == 1)
-#define CHIP_REV_IS_ASIC(_p_dev) (!CHIP_REV_IS_SLOW(_p_dev))
+ #define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
#else
-#define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
-#define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
+ #define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
+ #define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
#endif
- u16 chip_metal;
-#define CHIP_METAL_MASK 0xff
-#define CHIP_METAL_SHIFT 4
-
- u16 chip_bond_id;
-#define CHIP_BOND_ID_MASK 0xf
-#define CHIP_BOND_ID_SHIFT 0
-
- u8 num_engines;
- u8 num_ports_in_engines;
- u8 num_funcs_in_port;
-
- u8 path_id;
- enum ecore_mf_mode mf_mode;
-#define IS_MF_DEFAULT(_p_hwfn) \
- (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
-#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
-#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
-
- int pcie_width;
- int pcie_speed;
- u8 ver_str[VER_SIZE];
+ u16 chip_metal;
+ #define CHIP_METAL_MASK 0xff
+ #define CHIP_METAL_SHIFT 4
+
+ u16 chip_bond_id;
+ #define CHIP_BOND_ID_MASK 0xf
+ #define CHIP_BOND_ID_SHIFT 0
+
+ u8 num_engines;
+ u8 num_ports_in_engines;
+ u8 num_funcs_in_port;
+
+ u8 path_id;
+ enum ecore_mf_mode mf_mode;
+ #define IS_MF_DEFAULT(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+ #define IS_MF_SI(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+ #define IS_MF_SD(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+
+ int pcie_width;
+ int pcie_speed;
+ u8 ver_str[NAME_SIZE]; /* @DPDK */
/* Add MF related configuration */
- u8 mcp_rev;
- u8 boot_mode;
+ u8 mcp_rev;
+ u8 boot_mode;
- u8 wol;
+ u8 wol;
- u32 int_mode;
- enum ecore_coalescing_mode int_coalescing_mode;
- u8 rx_coalesce_usecs;
- u8 tx_coalesce_usecs;
+ u32 int_mode;
+ enum ecore_coalescing_mode int_coalescing_mode;
+ u16 rx_coalesce_usecs;
+ u16 tx_coalesce_usecs;
/* Start Bar offset of first hwfn */
- void OSAL_IOMEM *regview;
- void OSAL_IOMEM *doorbells;
- u64 db_phys_addr;
- unsigned long db_size;
+ void OSAL_IOMEM *regview;
+ void OSAL_IOMEM *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
/* PCI */
- u8 cache_shift;
+ u8 cache_shift;
/* Init */
- const struct iro *iro_arr;
-#define IRO (p_hwfn->p_dev->iro_arr)
+ const struct iro *iro_arr;
+ #define IRO (p_hwfn->p_dev->iro_arr)
/* HW functions */
- u8 num_hwfns;
- struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+ u8 num_hwfns;
+ struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
/* SRIOV */
- struct ecore_hw_sriov_info sriov_info;
- unsigned long tunn_mode;
-#define IS_ECORE_SRIOV(edev) (!!((edev)->sriov_info.total_vfs))
- bool b_is_vf;
+ struct ecore_hw_sriov_info *p_iov_info;
+#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
+ bool b_hw_channel;
+
+ unsigned long tunn_mode;
- u32 drv_type;
+ bool b_is_vf;
- struct ecore_eth_stats *reset_stats;
- struct ecore_fw_data *fw_data;
+ u32 drv_type;
- u32 mcp_nvm_resp;
+ u32 rdma_max_sge;
+ u32 rdma_max_inline;
+ u32 rdma_max_srq_sge;
+
+ struct ecore_eth_stats *reset_stats;
+ struct ecore_fw_data *fw_data;
+
+ u32 mcp_nvm_resp;
/* Recovery */
- bool recov_in_prog;
+ bool recov_in_prog;
+
+/* Indicates whether should prevent attentions from being reasserted */
+
+ bool attn_clr_en;
+
+ /* Indicates whether allowing the MFW to collect a crash dump */
+ bool mdump_en;
+
+ /* Indicates if the reg_fifo is checked after any register access */
+ bool chk_reg_fifo;
#ifndef ASIC_ONLY
- bool b_is_emul_full;
+ bool b_is_emul_full;
#endif
- void *firmware;
-
- u64 fw_len;
+#ifdef CONFIG_ECORE_BINARY_FW /* @DPDK */
+ void *firmware;
+ u64 fw_len;
+#endif
+ /* @DPDK */
+ struct ecore_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
};
#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
@@ -688,12 +766,14 @@ struct ecore_dev {
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
+#ifndef REAL_ASIC_ONLY
#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \
(ECORE_IS_BB_A0(p_hwfn->p_dev)) && \
(ECORE_PATH_ID(p_hwfn) == 1) && \
((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \
(p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \
(p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G)))
+#endif
/**
* @brief ecore_concrete_to_sw_fid - get the sw function id from
@@ -704,10 +784,10 @@ struct ecore_dev {
* @return OSAL_INLINE u8
*/
static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
- u32 concrete_fid)
+ u32 concrete_fid)
{
- u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
- u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+ u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
+ u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID);
u8 sw_fid;
@@ -722,18 +802,6 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
#define PURE_LB_TC 8
#define OOO_LB_TC 9
-static OSAL_INLINE u16 ecore_sriov_get_next_vf(struct ecore_hwfn *p_hwfn,
- u16 rel_vf_id)
-{
- u16 i;
-
- for (i = rel_vf_id; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
- if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, i))
- return i;
-
- return p_hwfn->p_dev->sriov_info.total_vfs;
-}
-
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
u32 min_pf_rate);
@@ -744,11 +812,6 @@ void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
int ecore_device_num_engines(struct ecore_dev *p_dev);
int ecore_device_num_ports(struct ecore_dev *p_dev);
-#define ecore_for_each_vf(_p_hwfn, _i) \
- for (_i = ecore_sriov_get_next_vf(_p_hwfn, 0); \
- _i < _p_hwfn->p_dev->sriov_info.total_vfs; \
- _i = ecore_sriov_get_next_vf(_p_hwfn, _i + 1))
-
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
#endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
index c5734490..9ad1874f 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -118,6 +118,8 @@ struct ecore_chain {
u16 next_page_mask;
struct ecore_chain_pbl pbl;
+
+ void *dp_ctx;
};
#define ECORE_CHAIN_PBL_ENTRY_SIZE (8)
@@ -129,7 +131,7 @@ struct ecore_chain {
(1 + ((sizeof(struct ecore_chain_next) - 1) / \
(elem_size))) : 0)
-#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
+#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
((u32)(ELEMS_PER_PAGE(elem_size) - \
UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
@@ -183,7 +185,7 @@ static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
(u32)p_chain->u.chain16.cons_idx);
if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+ p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
return (u16)(p_chain->capacity - used);
}
@@ -196,11 +198,11 @@ ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
OSAL_ASSERT(is_chain_u32(p_chain));
used = (u32)(((u64)ECORE_U32_MAX + 1 +
- (u64)(p_chain->u.chain32.prod_idx)) -
- (u64)p_chain->u.chain32.cons_idx);
+ (u64)(p_chain->u.chain32.prod_idx)) -
+ (u64)p_chain->u.chain32.cons_idx);
if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
- p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+ p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
return p_chain->capacity - used;
}
@@ -307,21 +309,23 @@ ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
(((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
#define is_unusable_next_idx(p, idx) \
- ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
- (p)->usable_per_page)
+ ((((p)->u.chain16.idx + 1) & \
+ (p)->elem_per_page_mask) == (p)->usable_per_page)
#define is_unusable_next_idx_u32(p, idx) \
- ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) \
- == (p)->usable_per_page)
+ ((((p)->u.chain32.idx + 1) & \
+ (p)->elem_per_page_mask) == (p)->usable_per_page)
#define test_and_skip(p, idx) \
do { \
if (is_chain_u16(p)) { \
if (is_unusable_idx(p, idx)) \
- (p)->u.chain16.idx += (p)->elem_unusable; \
+ (p)->u.chain16.idx += \
+ (p)->elem_unusable; \
} else { \
if (is_unusable_idx_u32(p, idx)) \
- (p)->u.chain32.idx += (p)->elem_unusable; \
+ (p)->u.chain32.idx += \
+ (p)->elem_unusable; \
} \
} while (0)
@@ -518,14 +522,14 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
switch (p_chain->intended_use) {
case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
case ECORE_CHAIN_USE_TO_PRODUCE:
- /* Do nothing */
- break;
+ /* Do nothing */
+ break;
case ECORE_CHAIN_USE_TO_CONSUME:
- /* produce empty elements */
- for (i = 0; i < p_chain->capacity; i++)
+ /* produce empty elements */
+ for (i = 0; i < p_chain->capacity; i++)
ecore_chain_recycle_consumed(p_chain);
- break;
+ break;
}
}
@@ -540,12 +544,13 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
* @param intended_use
* @param mode
* @param cnt_type
+ * @param dp_ctx
*/
static OSAL_INLINE void
ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
enum ecore_chain_use_mode intended_use,
enum ecore_chain_mode mode,
- enum ecore_chain_cnt_type cnt_type)
+ enum ecore_chain_cnt_type cnt_type, void *dp_ctx)
{
/* chain fixed parameters */
p_chain->p_virt_addr = OSAL_NULL;
@@ -569,6 +574,8 @@ ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
p_chain->pbl.p_phys_table = 0;
p_chain->pbl.p_virt_table = OSAL_NULL;
p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
+
+ p_chain->dp_ctx = dp_ctx;
}
/**
@@ -721,4 +728,14 @@ static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain)
ECORE_CHAIN_PAGE_SIZE);
}
+int ecore_chain_print(struct ecore_chain *p_chain, char *buffer,
+ u32 buffer_size, u32 *element_indx, u32 stop_indx,
+ bool print_metadata,
+ int (*func_ptr_print_element)(struct ecore_chain *p_chain,
+ void *p_element,
+ char *buffer),
+ int (*func_ptr_print_metadata)(struct ecore_chain
+ *p_chain,
+ char *buffer));
+
#endif /* __ECORE_CHAIN_H__ */
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 1201c1a9..3dd953d9 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -18,6 +18,7 @@
#include "ecore_cxt.h"
#include "ecore_hw.h"
#include "ecore_dev_api.h"
+#include "ecore_sriov.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
@@ -60,6 +61,14 @@ union conn_context {
struct eth_conn_context eth_ctx;
};
+/* TYPE-0 task context - iSCSI, FCOE */
+union type0_task_context {
+};
+
+/* TYPE-1 task context - ROCE */
+union type1_task_context {
+};
+
struct src_ent {
u8 opaque[56];
u64 next;
@@ -71,6 +80,14 @@ struct src_ent {
#define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+#define SRQ_CXT_SIZE (sizeof(struct regpair) * 8) /* @DPDK */
+
+#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
+
+/* Alignment is inherent to the type1_task_context structure */
+#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
+
/* PF per protocl configuration object */
#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
@@ -96,6 +113,7 @@ struct ecore_conn_type_cfg {
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
+#define SRQ_BLK (0)
#define CDUT_SEG_BLK(n) (1 + (u8)(n))
#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
@@ -105,6 +123,7 @@ enum ilt_clients {
ILT_CLI_QM,
ILT_CLI_TM,
ILT_CLI_SRC,
+ ILT_CLI_TSDM,
ILT_CLI_MAX
};
@@ -172,6 +191,9 @@ struct ecore_cxt_mngr {
*/
u32 vf_count;
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
/* Acquired CIDs */
struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
@@ -179,6 +201,9 @@ struct ecore_cxt_mngr {
struct ecore_dma_mem *ilt_shadow;
u32 pf_start_line;
+ /* Mutex for a dynamic ILT allocation */
+ osal_mutex_t mutex;
+
/* SRC T2 */
struct ecore_dma_mem *t2;
u32 t2_num_pages;
@@ -197,6 +222,11 @@ static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
return type == PROTOCOLID_TOE;
}
+static bool tm_tid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_FCOE;
+}
+
/* counts the iids for the CDU/CDUC ILT client configuration */
struct ecore_cdu_iids {
u32 pf_cids;
@@ -255,6 +285,22 @@ static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
iids->pf_cids += p_cfg->cid_count;
iids->per_vf_cids += p_cfg->cids_per_vf;
}
+
+ if (tm_tid_proto(i)) {
+ struct ecore_tid_seg *segs = p_cfg->tid_seg;
+
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->pf_tids[j] += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
}
iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
@@ -317,7 +363,7 @@ static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
}
/* set the iids (cid/tid) count per protocol */
-void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
+static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
u32 cid_count, u32 vf_cid_cnt)
{
@@ -343,7 +389,7 @@ u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
}
-static u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
+u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
enum protocol_type type)
{
u32 cnt = 0;
@@ -378,7 +424,7 @@ static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
{
u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
- /* verfiy called once for each block */
+ /* verify that it's called once for each block */
if (p_blk->total_size)
return;
@@ -405,7 +451,8 @@ static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
p_cli->last.val = *p_line - 1;
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
- "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+ "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x"
+ " [Real %08x] Start line %d\n",
client_id, p_cli->first.val, p_cli->last.val,
p_blk->total_size, p_blk->real_size_in_page,
p_blk->start_line);
@@ -453,7 +500,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
- "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+ "hwfn [%d] - Set context mngr starting line to be 0x%08x\n",
p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
/* CDUC */
@@ -670,12 +717,27 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
- p_cli->pf_total_lines = curr_line - p_blk->start_line;
for (i = 1; i < p_mngr->vf_count; i++) {
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
}
+
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TSDM (SRQ CONTEXT) */
+ total = ecore_cxt_get_srq_count(p_hwfn);
+
+ if (total) {
+ p_cli = &p_mngr->clients[ILT_CLI_TSDM];
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
+
+ ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TSDM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
}
if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
@@ -787,7 +849,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
val = 0;
entries[j].next = OSAL_CPU_TO_BE64(val);
- conn_num -= ent_per_page;
+ conn_num -= ent_num;
}
return ECORE_SUCCESS;
@@ -797,18 +859,22 @@ t2_fail:
return rc;
}
+#define for_each_ilt_valid_client(pos, clients) \
+ for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+ if (!clients[pos].active) { \
+ continue; \
+ } else \
+
+
/* Total number of ILT lines used by this PF */
static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
{
u32 size = 0;
u32 i;
- for (i = 0; i < ILT_CLI_MAX; i++)
- if (!ilt_clients[i].active)
- continue;
- else
- size += (ilt_clients[i].last.val -
- ilt_clients[i].first.val + 1);
+ for_each_ilt_valid_client(i, ilt_clients)
+ size += (ilt_clients[i].last.val -
+ ilt_clients[i].first.val + 1);
return size;
}
@@ -842,7 +908,7 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
u32 lines, line, sz_left, lines_to_skip = 0;
/* Special handling for RoCE that supports dynamic allocation */
- if (ilt_client == ILT_CLI_CDUT)
+ if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)
return ECORE_SUCCESS;
lines_to_skip = p_blk->dynamic_line_cnt;
@@ -876,9 +942,9 @@ ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
ilt_shadow[line].size = size;
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
- "ILT shadow: Line [%d] Physical 0x%" PRIx64
+ "ILT shadow: Line [%d] Physical 0x%lx"
" Virtual %p Size %d\n",
- line, (u64)p_phys, p_virt, size);
+ line, (unsigned long)p_phys, p_virt, size);
sz_left -= size;
line++;
@@ -892,15 +958,16 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_ilt_client_cfg *clients = p_mngr->clients;
struct ecore_ilt_cli_blk *p_blk;
- enum _ecore_status_t rc;
u32 size, i, j, k;
+ enum _ecore_status_t rc;
size = ecore_cxt_ilt_shadow_size(clients);
p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
size * sizeof(struct ecore_dma_mem));
if (!p_mngr->ilt_shadow) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate ilt shadow table");
+ DP_NOTICE(p_hwfn, true,
+ "Failed to allocate ilt shadow table\n");
rc = ECORE_NOMEM;
goto ilt_shadow_fail;
}
@@ -909,10 +976,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
"Allocated 0x%x bytes for ilt shadow\n",
(u32)(size * sizeof(struct ecore_dma_mem)));
- for (i = 0; i < ILT_CLI_MAX; i++)
- if (!clients[i].active) {
- continue;
- } else {
+ for_each_ilt_valid_client(i, clients) {
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
p_blk = &clients[i].pf_blks[j];
rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
@@ -991,6 +1055,7 @@ cid_map_fail:
enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
{
+ struct ecore_ilt_client_cfg *clients;
struct ecore_cxt_mngr *p_mngr;
u32 i;
@@ -1002,35 +1067,48 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
}
/* Initialize ILT client registers */
- p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
- p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
- p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+ clients = p_mngr->clients;
+ clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
- p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
- p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
- p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+ clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+ clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+ clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
- p_mngr->clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
- p_mngr->clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
- p_mngr->clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+ clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+ clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+ clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
- p_mngr->clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
- p_mngr->clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
- p_mngr->clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+ clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+ clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+ clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
- p_mngr->clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
- p_mngr->clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
- p_mngr->clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+ clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
+ clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
+ clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 32K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
- /* Initialize task sizes */
- p_mngr->task_type_size[0] = 512; /* @DPDK */
- p_mngr->task_type_size[1] = 128; /* @DPDK */
+ /* due to removal of ISCSI/FCoE files union type0_task_context
+ * task_type_size will be 0. So hardcoded for now.
+ */
+ p_mngr->task_type_size[0] = 512; /* @DPDK */
+ p_mngr->task_type_size[1] = 128; /* @DPDK */
+
+ if (p_hwfn->p_dev->p_iov_info)
+ p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ /* Initialize the dynamic ILT allocation mutex */
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
+ OSAL_MUTEX_INIT(&p_mngr->mutex);
- p_mngr->vf_count = p_hwfn->p_dev->sriov_info.total_vfs;
/* Set the cxt mangr pointer priori to further allocations */
p_hwfn->p_cxt_mngr = p_mngr;
@@ -1077,6 +1155,7 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
ecore_cid_map_free(p_hwfn);
ecore_cxt_src_t2_free(p_hwfn);
ecore_ilt_shadow_free(p_hwfn);
+ OSAL_MUTEX_DEALLOC(&p_mngr->mutex);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
p_hwfn->p_cxt_mngr = OSAL_NULL;
@@ -1362,10 +1441,7 @@ static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
int i;
ilt_clients = p_hwfn->p_cxt_mngr->clients;
- for (i = 0; i < ILT_CLI_MAX; i++)
- if (!ilt_clients[i].active) {
- continue;
- } else {
+ for_each_ilt_valid_client(i, ilt_clients) {
STORE_RT_REG(p_hwfn,
ilt_clients[i].first.reg,
ilt_clients[i].first.val);
@@ -1383,13 +1459,16 @@ static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
u32 blk_factor;
/* For simplicty we set the 'block' to be an ILT page */
- STORE_RT_REG(p_hwfn,
- PSWRQ2_REG_VF_BASE_RT_OFFSET,
- p_hwfn->hw_info.first_vf_in_pf);
- STORE_RT_REG(p_hwfn,
- PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
- p_hwfn->hw_info.first_vf_in_pf +
- p_hwfn->p_dev->sriov_info.total_vfs);
+ if (p_hwfn->p_dev->p_iov_info) {
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_BASE_RT_OFFSET,
+ p_iov->first_vf_in_pf);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+ p_iov->first_vf_in_pf + p_iov->total_vfs);
+ }
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
@@ -1448,10 +1527,7 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
p_shdw = p_mngr->ilt_shadow;
clients = p_hwfn->p_cxt_mngr->clients;
- for (i = 0; i < ILT_CLI_MAX; i++)
- if (!clients[i].active) {
- continue;
- } else {
+ for_each_ilt_valid_client(i, clients) {
/* Client's 1st val and RT array are absolute, ILT shadows'
* lines are relative.
*/
@@ -1474,9 +1550,10 @@ static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
"Setting RT[0x%08x] from"
" ILT[0x%08x] [Client is %d] to"
- " Physical addr: 0x%" PRIx64 "\n",
+ " Physical addr: 0x%lx\n",
rt_offst, line, i,
- (u64)(p_shdw[line].p_phys >> 12));
+ (unsigned long)(p_shdw[line].
+ p_phys >> 12));
}
STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
@@ -1545,11 +1622,11 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
- SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
(sizeof(cfg_word) / sizeof(u32)) *
- (p_hwfn->hw_info.first_vf_in_pf + i);
+ (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
}
@@ -1557,7 +1634,7 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
- SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
(sizeof(cfg_word) / sizeof(u32)) *
@@ -1583,7 +1660,7 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
(sizeof(cfg_word) / sizeof(u32)) *
- (p_hwfn->hw_info.first_vf_in_pf + i);
+ (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
}
@@ -1613,15 +1690,26 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
/* @@@TBD how to enable the scan for the VFs */
}
-static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn)
+static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
{
+ struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_conn_type_cfg *p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+ struct ecore_tid_seg *p_tid;
+
+ /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
+ if (!p_fcoe->cid_count)
+ return;
+
+ p_tid = &p_fcoe->tid_seg[ECORE_CXT_FCOE_TID_SEG];
+ STORE_RT_REG_AGG(p_hwfn,
+ PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
+ p_tid->count);
}
void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
{
/* CDU configuration */
ecore_cdu_init_common(p_hwfn);
- ecore_prs_init_common(p_hwfn);
}
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
@@ -1633,6 +1721,7 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
ecore_ilt_init_pf(p_hwfn);
ecore_src_init_pf(p_hwfn);
ecore_tm_init_pf(p_hwfn);
+ ecore_prs_init_pf(p_hwfn);
}
enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
@@ -1650,7 +1739,7 @@ enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
p_mngr->acquired[type].max_count);
if (rel_cid >= p_mngr->acquired[type].max_count) {
- DP_NOTICE(p_hwfn, false, "no CID available for protocol %d",
+ DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
type);
return ECORE_NORESOURCES;
}
@@ -1750,6 +1839,20 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ p_mgr->srq_count = num_srqs;
+}
+
+u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ return p_mgr->srq_count;
+}
+
enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
{
/* Set the number of required CORE connections */
@@ -1820,13 +1923,130 @@ enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
/* This function is very RoCE oriented, if another protocol in the future
* will want this feature we'll need to modify the function to be more generic
*/
+enum _ecore_status_t
+ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ u32 iid)
+{
+ u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
+ struct ecore_ilt_client_cfg *p_cli;
+ struct ecore_ilt_cli_blk *p_blk;
+ struct ecore_ptt *p_ptt;
+ dma_addr_t p_phys;
+ u64 ilt_hw_entry;
+ void *p_virt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (elem_type) {
+ case ECORE_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case ECORE_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case ECORE_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_INVALID elem type = %d", elem_type);
+ return ECORE_INVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ line = p_blk->start_line + (iid / elems_per_p);
+ shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ /* If line is already allocated, do nothing, otherwise allocate it and
+ * write it to the PSWRQ2 registers.
+ * This section can be run in parallel from different contexts and thus
+ * a mutex protection is needed.
+ */
+
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
+
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ goto out0;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
+ rc = ECORE_TIMEOUT;
+ goto out0;
+ }
+
+ p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_phys,
+ p_blk->real_size_in_page);
+ if (!p_virt) {
+ rc = ECORE_NOMEM;
+ goto out1;
+ }
+ OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
+ p_blk->real_size_in_page;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
+
+ ilt_hw_entry = 0;
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry,
+ ILT_ENTRY_PHY_ADDR,
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+
+/* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
+
+ ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
+ reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
+ 0 /* no flags */);
+
+ if (elem_type == ECORE_ELEM_CXT) {
+ u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
+ elems_per_p;
+
+ /* Update the relevant register in the parser */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
+ last_cid_allocated - 1);
+
+ if (!p_hwfn->b_rdma_enabled_in_prs) {
+ /* Enable RoCE search */
+ ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+ p_hwfn->b_rdma_enabled_in_prs = true;
+ }
+ }
+
+out1:
+ ecore_ptt_release(p_hwfn, p_ptt);
+out0:
+ OSAL_MUTEX_RELEASE(&p_hwfn->p_cxt_mngr->mutex);
+
+ return rc;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
static enum _ecore_status_t
ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
enum ecore_cxt_elem_type elem_type,
u32 start_iid, u32 count)
{
- u32 reg_offset, elem_size, hw_p_size, elems_per_p;
u32 start_line, end_line, shadow_start_line, shadow_end_line;
+ u32 reg_offset, elem_size, hw_p_size, elems_per_p;
struct ecore_ilt_client_cfg *p_cli;
struct ecore_ilt_cli_blk *p_blk;
u32 end_iid = start_iid + count;
@@ -1834,10 +2054,26 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
u64 ilt_hw_entry = 0;
u32 i;
- if (elem_type == ECORE_ELEM_CXT) {
+ switch (elem_type) {
+ case ECORE_ELEM_CXT:
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
elem_size = CONN_CXT_SIZE(p_hwfn);
p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case ECORE_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case ECORE_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "ECORE_INVALID elem type = %d", elem_type);
+ return ECORE_INVAL;
}
/* Calculate line in ilt */
@@ -1876,9 +2112,14 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
((start_line++) * ILT_REG_SIZE_IN_BYTES *
ILT_ENTRY_IN_REGS);
- ecore_wr(p_hwfn, p_ptt, reg_offset, U64_LO(ilt_hw_entry));
- ecore_wr(p_hwfn, p_ptt, reg_offset + ILT_REG_SIZE_IN_BYTES,
- U64_HI(ilt_hw_entry));
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
+ * wide-bus.
+ */
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&ilt_hw_entry,
+ reg_offset,
+ sizeof(ilt_hw_entry) / sizeof(u32),
+ 0 /* no flags */);
}
ecore_ptt_release(p_hwfn, p_ptt);
@@ -1907,6 +2148,12 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
ecore_cxt_get_proto_tid_count(p_hwfn,
proto));
+ if (rc)
+ return rc;
+
+ /* Free TSDM CXT */
+ rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_SRQ, 0,
+ ecore_cxt_get_srq_count(p_hwfn));
return rc;
}
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 1ac95f98..5379d7bc 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -13,24 +13,38 @@
#include "ecore_proto_if.h"
#include "ecore_cxt_api.h"
+/* Tasks segments definitions */
+#define ECORE_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI /* 0 */
+#define ECORE_CXT_FCOE_TID_SEG PROTOCOLID_FCOE /* 1 */
+#define ECORE_CXT_ROCE_TID_SEG PROTOCOLID_ROCE /* 2 */
+
enum ecore_cxt_elem_type {
ECORE_ELEM_CXT,
+ ECORE_ELEM_SRQ,
ECORE_ELEM_TASK
};
u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
- enum protocol_type type, u32 *vf_cid);
+ enum protocol_type type,
+ u32 *vf_cid);
+
+u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type);
u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
enum protocol_type type);
+u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
+#ifndef LINUX_REMOVE
/**
* @brief ecore_cxt_qm_iids - fills the cid/tid counts for the QM configuration
*
* @param p_hwfn
* @param iids [out], a structure holding all the counters
*/
-void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids);
+void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_iids *iids);
+#endif
/**
* @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
@@ -42,18 +56,6 @@ void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids);
enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_cxt_set_proto_cid_count - Set the max cids per protocol for cxt
- * init
- *
- * @param p_hwfn
- * @param type
- * @param cid_cnt - number of pf cids
- * @param vf_cid_cnt - number of vf cids
- */
-void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
- enum protocol_type type,
- u32 cid_cnt, u32 vf_cid_cnt);
-/**
* @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
*
* @param p_hwfn
@@ -134,7 +136,24 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param cid
*/
-void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
+ u32 cid);
+
+/**
+ * @brief ecore_cxt_get_tid_mem_info - function checks if the
+ * page containing the iid in the ilt is already
+ * allocated, if it is not it allocates the page.
+ *
+ * @param p_hwfn
+ * @param elem_type
+ * @param iid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
+ enum ecore_cxt_elem_type elem_type,
+ u32 iid);
/**
* @brief ecore_cxt_free_proto_ilt - function frees ilt pages
@@ -152,6 +171,7 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
#define ECORE_CTX_FL_MEM 1
enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
u32 tid,
- u8 ctx_type, void **task_ctx);
+ u8 ctx_type,
+ void **task_ctx);
#endif /* _ECORE_CID_ */
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
index d98dddb2..6a50412a 100644
--- a/drivers/net/qede/base/ecore_cxt_api.h
+++ b/drivers/net/qede/base/ecore_cxt_api.h
@@ -12,9 +12,9 @@
struct ecore_hwfn;
struct ecore_cxt_info {
- void *p_cxt;
- u32 iid;
- enum protocol_type type;
+ void *p_cxt;
+ u32 iid;
+ enum protocol_type type;
};
#define MAX_TID_BLOCKS 512
@@ -22,24 +22,9 @@ struct ecore_tid_mem {
u32 tid_size;
u32 num_tids_per_block;
u32 waste;
- u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
+ u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
};
-static OSAL_INLINE void *get_task_mem(struct ecore_tid_mem *info, u32 tid)
-{
- /* note: waste is superfluous */
- return (void *)(info->blocks[tid / info->num_tids_per_block] +
- (tid % info->num_tids_per_block) * info->tid_size);
-
- /* more elaborate alternative with no modulo
- * u32 mask = info->tid_size * info->num_tids_per_block +
- * info->waste - 1;
- * u32 index = tid / info->num_tids_per_block;
- * u32 offset = tid * info->tid_size + index * info->waste;
- * return (void *)(blocks[index] + (offset & mask));
- */
-}
-
/**
* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
*
@@ -49,7 +34,7 @@ static OSAL_INLINE void *get_task_mem(struct ecore_tid_mem *info, u32 tid)
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
u32 *p_cid);
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 6a966cb9..8175619a 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -15,7 +15,6 @@
#include "ecore_iro.h"
#define ECORE_DCBX_MAX_MIB_READ_TRY (100)
-#define ECORE_MAX_PFC_PRIORITIES 8
#define ECORE_ETH_TYPE_DEFAULT (0)
#define ECORE_DCBX_INVALID_PRIORITY 0xFF
@@ -24,7 +23,7 @@
* the traffic class corresponding to the priority.
*/
#define ECORE_DCBX_PRIO2TC(prio_tc_tbl, prio) \
- ((u32)(pri_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+ ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
{
@@ -38,6 +37,18 @@ static bool ecore_dcbx_app_port(u32 app_info_bitmap)
DCBX_APP_SF_PORT) ? true : false;
}
+static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
+{
+ u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return ecore_dcbx_app_port(app_info_bitmap);
+
+ return (mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT) ?
+ true : false;
+}
+
static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
{
return (ecore_dcbx_app_ethtype(app_info_bitmap) &&
@@ -62,6 +73,12 @@ static bool ecore_dcbx_ieee(u32 dcbx_cfg_bitmap)
DCBX_CONFIG_VERSION_IEEE) ? true : false;
}
+static bool ecore_dcbx_local(u32 dcbx_cfg_bitmap)
+{
+ return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_STATIC) ? true : false;
+}
+
/* @@@TBD A0 Eagle workaround */
void ecore_dcbx_eagle_workaround(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, bool set_to_pfc)
@@ -83,8 +100,8 @@ ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
{
struct ecore_hw_info *p_info = &p_hwfn->hw_info;
enum dcbx_protocol_type id;
- bool enable, update;
- u8 prio, tc, size;
+ u8 prio, tc, size, update;
+ bool enable;
const char *name; /* @DPDK */
int i;
@@ -102,8 +119,10 @@ ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
prio = p_data->arr[id].priority;
DP_INFO(p_hwfn,
- "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
- name, update, enable, prio, tc, p_info->num_tc);
+ "%s info: update %d, enable %d, prio %d, tc %d,"
+ " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
+ name, update, enable, prio, tc, p_info->num_active_tc,
+ p_data->arr[id].dscp_enable, p_data->arr[id].dscp_val);
}
}
@@ -112,28 +131,42 @@ ecore_dcbx_set_pf_tcs(struct ecore_hw_info *p_info,
u8 tc, enum ecore_pci_personality personality)
{
/* QM reconf data */
- if (p_info->personality == personality) {
- if (personality == ECORE_PCI_ETH)
- p_info->non_offload_tc = tc;
- else
- p_info->offload_tc = tc;
- }
+ if (p_info->personality == personality)
+ p_info->offload_tc = tc;
}
void
ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
- struct ecore_hw_info *p_info,
+ struct ecore_hwfn *p_hwfn,
bool enable, bool update, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum ecore_pci_personality personality)
{
+ struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
+
/* PF update ramrod data */
- p_data->arr[type].update = update;
p_data->arr[type].enable = enable;
p_data->arr[type].priority = prio;
p_data->arr[type].tc = tc;
+ p_data->arr[type].dscp_enable = dscp->enabled;
+ if (p_data->arr[type].dscp_enable) {
+ u8 i;
+
+ for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
+ if (prio == dscp->dscp_pri_map[i]) {
+ p_data->arr[type].dscp_val = i;
+ break;
+ }
+ }
+
+ if (enable && p_data->arr[type].dscp_enable)
+ p_data->arr[type].update = UPDATE_DCB_DSCP;
+ else if (enable)
+ p_data->arr[type].update = UPDATE_DCB;
+ else
+ p_data->arr[type].update = DONT_UPDATE_DCB_DHCP;
- ecore_dcbx_set_pf_tcs(p_info, tc, personality);
+ ecore_dcbx_set_pf_tcs(&p_hwfn->hw_info, tc, personality);
}
/* Update app protocol data and hw_info fields with the TLV info */
@@ -143,7 +176,6 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
bool enable, bool update, u8 prio, u8 tc,
enum dcbx_protocol_type type)
{
- struct ecore_hw_info *p_info = &p_hwfn->hw_info;
enum ecore_pci_personality personality;
enum dcbx_protocol_type id;
const char *name; /* @DPDK */
@@ -161,7 +193,7 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
personality = ecore_dcbx_app_update[i].personality;
name = ecore_dcbx_app_update[i].name;
- ecore_dcbx_set_params(p_data, p_info, enable, update,
+ ecore_dcbx_set_params(p_data, p_hwfn, enable, update,
prio, tc, type, personality);
}
}
@@ -197,7 +229,8 @@ ecore_dcbx_get_app_priority(u8 pri_bitmap, u8 *priority)
static bool
ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
- u32 app_prio_bitmap, u16 id, int *type)
+ u32 app_prio_bitmap, u16 id,
+ enum dcbx_protocol_type *type, bool ieee)
{
bool status = false;
@@ -205,7 +238,11 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
*type = DCBX_PROTOCOL_ETH;
status = true;
} else {
- DP_ERR(p_hwfn, "Unsupported protocol %d\n", id);
+ *type = DCBX_MAX_PROTOCOL_TYPE;
+ DP_ERR(p_hwfn,
+ "No action required, App TLV id = 0x%x"
+ " app_prio_bitmap = 0x%x\n",
+ id, app_prio_bitmap);
}
return status;
@@ -218,16 +255,18 @@ static enum _ecore_status_t
ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
- int count, bool dcbx_enabled)
+ int count, u8 dcbx_version)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
u8 tc, priority, priority_map;
- int i, type = -1;
+ enum dcbx_protocol_type type;
+ bool enable, ieee;
u16 protocol_id;
- bool enable;
+ int i;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Num APP entries = %d\n", count);
+ ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
/* Parse APP TLV */
for (i = 0; i < count; i++) {
protocol_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
@@ -242,7 +281,8 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
tc = ECORE_DCBX_PRIO2TC(pri_tc_tbl, priority);
if (ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
- protocol_id, &type)) {
+ protocol_id, &type,
+ ieee)) {
/* ETH always have the enable bit reset, as it gets
* vlan information per packet. For other protocols,
* should be set according to the dcbx_enabled
@@ -267,7 +307,7 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
if (p_data->arr[type].update)
continue;
- enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled;
+ enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
priority, tc, type);
}
@@ -288,14 +328,11 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
struct dcbx_ets_feature *p_ets;
struct ecore_hw_info *p_info;
u32 pri_tc_tbl, flags;
- bool dcbx_enabled;
+ u8 dcbx_version;
int num_entries;
- /* If DCBx version is non zero, then negotiation was
- * successfuly performed
- */
flags = p_hwfn->p_dcbx_info->operational.flags;
- dcbx_enabled = ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) != 0;
+ dcbx_version = ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
p_app = &p_hwfn->p_dcbx_info->operational.features.app;
p_tbl = p_app->app_pri_tbl;
@@ -307,13 +344,14 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
num_entries = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
- num_entries, dcbx_enabled);
+ num_entries, dcbx_version);
if (rc != ECORE_SUCCESS)
return rc;
- p_info->num_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ p_info->num_active_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
data.pf_id = p_hwfn->rel_pf_id;
- data.dcbx_enabled = dcbx_enabled;
+ data.dcbx_enabled = !!dcbx_version;
ecore_dcbx_dp_protocol(p_hwfn, &data);
@@ -348,14 +386,16 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
read_count++;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ "mib type = %d, try count = %d prefix seq num ="
+ " %d suffix seq num = %d\n",
type, read_count, prefix_seq_num, suffix_seq_num);
} while ((prefix_seq_num != suffix_seq_num) &&
(read_count < ECORE_DCBX_MAX_MIB_READ_TRY));
if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) {
DP_ERR(p_hwfn,
- "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ "MIB read err, mib type = %d, try count ="
+ " %d prefix seq num = %d suffix seq num = %d\n",
type, read_count, prefix_seq_num, suffix_seq_num);
rc = ECORE_IO;
}
@@ -384,36 +424,92 @@ static void
ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
- struct ecore_dcbx_params *p_params)
+ struct ecore_dcbx_params *p_params, bool ieee)
{
+ struct ecore_app_entry *entry;
+ u8 pri_map;
int i;
p_params->app_willing = ECORE_MFW_GET_FIELD(p_app->flags,
DCBX_APP_WILLING);
p_params->app_valid = ECORE_MFW_GET_FIELD(p_app->flags,
DCBX_APP_ENABLED);
+ p_params->app_error = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
p_params->num_app_entries = ECORE_MFW_GET_FIELD(p_app->flags,
- DCBX_APP_ENABLED);
- for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
- p_params->app_bitmap[i] = p_tbl[i].entry;
+ DCBX_APP_NUM_ENTRIES);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_params->app_entry[i];
+ if (ieee) {
+ u8 sf_ieee;
+ u32 val;
+
+ sf_ieee = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF_IEEE);
+ switch (sf_ieee) {
+ case DCBX_APP_SF_IEEE_RESERVED:
+ /* Old MFW */
+ val = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF);
+ entry->sf_ieee = val ?
+ ECORE_DCBX_SF_IEEE_TCP_UDP_PORT :
+ ECORE_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_ETHTYPE:
+ entry->sf_ieee = ECORE_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_PORT:
+ entry->sf_ieee = ECORE_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_UDP_PORT:
+ entry->sf_ieee = ECORE_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
+ entry->sf_ieee =
+ ECORE_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ }
+ } else {
+ entry->ethtype = !(ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
+ }
+
+ pri_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ ecore_dcbx_get_app_priority(pri_map, &entry->prio);
+ entry->proto_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ entry->proto_id,
+ &entry->proto_type, ieee);
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "APP params: willing %d, valid %d\n",
- p_params->app_willing, p_params->app_valid);
+ "APP params: willing %d, valid %d error = %d\n",
+ p_params->app_willing, p_params->app_valid,
+ p_params->app_error);
}
static void
ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
u32 pfc, struct ecore_dcbx_params *p_params)
{
- p_params->pfc_willing = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
- p_params->max_pfc_tc = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
- p_params->pfc_enabled = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
- p_params->pfc_bitmap = pfc;
+ u8 pfc_map;
+
+ p_params->pfc.willing = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->pfc.max_tc = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.enabled = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
+ pfc_map = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+ p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
+ p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
+ p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
+ p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3);
+ p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4);
+ p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5);
+ p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6);
+ p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"PFC params: willing %d, pfc_bitmap %d\n",
- p_params->pfc_willing, p_params->pfc_bitmap);
+ p_params->pfc.willing, pfc_map);
}
static void
@@ -421,28 +517,34 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
struct dcbx_ets_feature *p_ets,
struct ecore_dcbx_params *p_params)
{
+ u32 bw_map[2], tsa_map[2], pri_map;
int i;
p_params->ets_willing = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_WILLING);
p_params->ets_enabled = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_ENABLED);
+ p_params->ets_cbs = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
- p_params->ets_pri_tc_tbl[0] = p_ets->pri_tc_tbl[0];
-
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "ETS params: willing %d, pri_tc_tbl_0 %x max_ets_tc %d\n",
- p_params->ets_willing, p_params->ets_pri_tc_tbl[0],
- p_params->max_ets_tc);
+ "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x"
+ " max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_cbs,
+ p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
* encoded in a type u32 array of size 2.
*/
- for (i = 0; i < 2; i++) {
- p_params->ets_tc_tsa_tbl[i] = p_ets->tc_tsa_tbl[i];
- p_params->ets_tc_bw_tbl[i] = p_ets->tc_bw_tbl[i];
-
+ bw_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[0]);
+ bw_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[1]);
+ tsa_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[0]);
+ tsa_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[1]);
+ pri_map = OSAL_BE32_TO_CPU(p_ets->pri_tc_tbl[0]);
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
+ p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
+ p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
+ p_params->ets_pri_tc_tbl[i] = ECORE_DCBX_PRIO2TC(pri_map, i);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"elem %d bw_tbl %x tsa_tbl %x\n",
i, p_params->ets_tc_bw_tbl[i],
@@ -455,9 +557,10 @@ ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
struct dcbx_ets_feature *p_ets,
- u32 pfc, struct ecore_dcbx_params *p_params)
+ u32 pfc, struct ecore_dcbx_params *p_params,
+ bool ieee)
{
- ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params);
+ ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
ecore_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
ecore_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
@@ -483,7 +586,8 @@ ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
p_ets = &p_hwfn->p_dcbx_info->local_admin.features.ets;
pfc = p_hwfn->p_dcbx_info->local_admin.features.pfc;
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data);
+ ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
+ false);
p_local->valid = true;
return ECORE_SUCCESS;
@@ -508,7 +612,8 @@ ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
p_ets = &p_hwfn->p_dcbx_info->remote.features.ets;
pfc = p_hwfn->p_dcbx_info->remote.features.pfc;
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data);
+ ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
+ false);
p_remote->valid = true;
return ECORE_SUCCESS;
@@ -551,12 +656,15 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
p_operational->ieee = ecore_dcbx_ieee(flags);
p_operational->cee = ecore_dcbx_cee(flags);
+ p_operational->local = ecore_dcbx_local(flags);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "Version support: ieee %d, cee %d\n",
- p_operational->ieee, p_operational->cee);
+ "Version support: ieee %d, cee %d, static %d\n",
+ p_operational->ieee, p_operational->cee,
+ p_operational->local);
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data);
+ ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
+ p_operational->ieee);
ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
p_results);
err = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
@@ -568,6 +676,35 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
+ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_dscp_params *p_dscp;
+ struct dcb_dscp_map *p_dscp_map;
+ int i, j, entry;
+ u32 pri_map;
+
+ p_dscp = &params->dscp;
+ p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
+ p_dscp->enabled = ECORE_MFW_GET_FIELD(p_dscp_map->flags,
+ DCB_DSCP_ENABLE);
+ /* MFW encodes 64 dscp entries into 8 element array of u32 entries,
+ * where each entry holds the 4bit priority map for 8 dscp entries.
+ */
+ for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
+ pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
+ entry, pri_map);
+ for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
+ p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
+ (j * 4)) & 0xf;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
@@ -668,6 +805,7 @@ ecore_dcbx_read_remote_lldp_mib(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_dcbx_mib_meta_data data;
+ OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
lldp_status_params);
data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
@@ -685,6 +823,7 @@ ecore_dcbx_read_operational_mib(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_mib_meta_data data;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, operational_dcbx_mib);
data.mib = &p_hwfn->p_dcbx_info->operational;
@@ -702,6 +841,7 @@ ecore_dcbx_read_remote_mib(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_mib_meta_data data;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, remote_dcbx_mib);
data.mib = &p_hwfn->p_dcbx_info->remote;
@@ -727,6 +867,18 @@ ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
return rc;
}
+static void
+ecore_dcbx_read_dscp_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, dcb_dscp_map);
+ data.dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
+ data.size = sizeof(struct dcb_dscp_map);
+ ecore_memcpy_from(p_hwfn, p_ptt, data.dscp_map, data.addr, data.size);
+}
+
static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_mib_read_type type)
@@ -735,6 +887,7 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
switch (type) {
case ECORE_DCBX_OPERATIONAL_MIB:
+ ecore_dcbx_read_dscp_mib(p_hwfn, p_ptt);
rc = ecore_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
break;
case ECORE_DCBX_REMOTE_MIB:
@@ -773,6 +926,9 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
if (type == ECORE_DCBX_OPERATIONAL_MIB) {
+ ecore_dcbx_get_dscp_params(p_hwfn, p_ptt,
+ &p_hwfn->p_dcbx_info->get);
+
rc = ecore_dcbx_process_mib_info(p_hwfn);
if (!rc) {
bool enabled;
@@ -793,6 +949,14 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
}
}
ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+
+ /* Update the DSCP to TC mapping bit if required */
+ if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
+ p_hwfn->p_dcbx_info->dscp_nig_update) {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, 0x1);
+ p_hwfn->p_dcbx_info->dscp_nig_update = false;
+ }
+
OSAL_DCBX_AEN(p_hwfn, type);
return rc;
@@ -826,6 +990,8 @@ static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
p_data->dcb_enable_flag = p_src->arr[type].enable;
p_data->dcb_priority = p_src->arr[type].priority;
p_data->dcb_tc = p_src->arr[type].tc;
+ p_data->dscp_enable_flag = p_src->arr[type].dscp_enable;
+ p_data->dscp_val = p_src->arr[type].dscp_val;
}
/* Set pf update ramrod command params */
@@ -833,7 +999,7 @@ void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest)
{
struct protocol_dcb_data *p_dcb_data;
- bool update_flag;
+ bool update_flag = false;
p_dest->pf_id = p_src->pf_id;
@@ -885,3 +1051,302 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
return rc;
}
+
+static void
+ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
+ u32 *pfc, struct ecore_dcbx_params *p_params)
+{
+ u8 pfc_map = 0;
+ int i;
+
+ if (p_params->pfc.willing)
+ *pfc |= DCBX_PFC_WILLING_MASK;
+ else
+ *pfc &= ~DCBX_PFC_WILLING_MASK;
+
+ if (p_params->pfc.enabled)
+ *pfc |= DCBX_PFC_ENABLED_MASK;
+ else
+ *pfc &= ~DCBX_PFC_ENABLED_MASK;
+
+ *pfc &= ~DCBX_PFC_CAPS_MASK;
+ *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
+
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
+ if (p_params->pfc.prio[i])
+ pfc_map |= (0x1 << i);
+
+ *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
+}
+
+static void
+ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct ecore_dcbx_params *p_params)
+{
+ u8 *bw_map, *tsa_map;
+ int i;
+
+ if (p_params->ets_willing)
+ p_ets->flags |= DCBX_ETS_WILLING_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
+
+ if (p_params->ets_cbs)
+ p_ets->flags |= DCBX_ETS_CBS_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_CBS_MASK;
+
+ if (p_params->ets_enabled)
+ p_ets->flags |= DCBX_ETS_ENABLED_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
+
+ p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
+ p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
+
+ bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
+ tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
+ p_ets->pri_tc_tbl[0] = 0;
+ for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
+ bw_map[i] = p_params->ets_tc_bw_tbl[i];
+ tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
+ p_ets->pri_tc_tbl[0] |= (((u32)p_params->ets_pri_tc_tbl[i]) <<
+ ((7 - i) * 4));
+ }
+ p_ets->pri_tc_tbl[0] = OSAL_CPU_TO_BE32(p_ets->pri_tc_tbl[0]);
+ for (i = 0; i < 2; i++) {
+ p_ets->tc_bw_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_bw_tbl[i]);
+ p_ets->tc_tsa_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_tsa_tbl[i]);
+ }
+}
+
+static void
+ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct ecore_dcbx_params *p_params, bool ieee)
+{
+ u32 *entry;
+ int i;
+
+ if (p_params->app_willing)
+ p_app->flags |= DCBX_APP_WILLING_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_WILLING_MASK;
+
+ if (p_params->app_valid)
+ p_app->flags |= DCBX_APP_ENABLED_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_ENABLED_MASK;
+
+ p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
+ p_app->flags |= (u32)p_params->num_app_entries <<
+ DCBX_APP_NUM_ENTRIES_SHIFT;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_app->app_pri_tbl[i].entry;
+ if (ieee) {
+ *entry &= ~DCBX_APP_SF_IEEE_MASK;
+ switch (p_params->app_entry[i].sf_ieee) {
+ case ECORE_DCBX_SF_IEEE_ETHTYPE:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ break;
+ case ECORE_DCBX_SF_IEEE_TCP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ break;
+ case ECORE_DCBX_SF_IEEE_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ break;
+ case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
+ *entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT;
+ break;
+ }
+ } else {
+ *entry &= ~DCBX_APP_SF_MASK;
+ if (p_params->app_entry[i].ethtype)
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ else
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ }
+ *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
+ *entry |= ((u32)p_params->app_entry[i].proto_id <<
+ DCBX_APP_PROTOCOL_ID_SHIFT);
+ *entry &= ~DCBX_APP_PRI_MAP_MASK;
+ *entry |= ((u32)(p_params->app_entry[i].prio) <<
+ DCBX_APP_PRI_MAP_SHIFT);
+ }
+}
+
+static enum _ecore_status_t
+ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
+ struct dcbx_local_params *local_admin,
+ struct ecore_dcbx_set *params)
+{
+ bool ieee = false;
+
+ local_admin->flags = 0;
+ OSAL_MEMCPY(&local_admin->features,
+ &p_hwfn->p_dcbx_info->operational.features,
+ sizeof(struct dcbx_features));
+
+ if (params->enabled) {
+ local_admin->config = params->ver_num;
+ ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
+ } else {
+ local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+ }
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_PFC_CFG)
+ ecore_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
+ &params->config.params);
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_ETS_CFG)
+ ecore_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
+ &params->config.params);
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_APP_CFG)
+ ecore_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
+ &params->config.params, ieee);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
+ struct dcb_dscp_map *p_dscp_map,
+ struct ecore_dcbx_set *p_params)
+{
+ int entry, i, j;
+ u32 val;
+
+ OSAL_MEMCPY(p_dscp_map, &p_hwfn->p_dcbx_info->dscp_map,
+ sizeof(*p_dscp_map));
+
+ if (p_params->dscp.enabled)
+ p_dscp_map->flags |= DCB_DSCP_ENABLE_MASK;
+ else
+ p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
+
+ for (i = 0, entry = 0; i < 8; i++) {
+ val = 0;
+ for (j = 0; j < 8; j++, entry++)
+ val |= (((u32)p_params->dscp.dscp_pri_map[entry]) <<
+ (j * 4));
+
+ p_dscp_map->dscp_pri_map[i] = OSAL_CPU_TO_BE32(val);
+ }
+
+ p_hwfn->p_dcbx_info->dscp_nig_update = true;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_set *params,
+ bool hw_commit)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_dcbx_mib_meta_data data;
+ struct dcbx_local_params local_admin;
+ struct dcb_dscp_map dscp_map;
+ u32 resp = 0, param = 0;
+
+ if (!hw_commit) {
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
+ sizeof(struct ecore_dcbx_set));
+ return ECORE_SUCCESS;
+ }
+
+ /* clear set-parmas cache */
+ OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
+ sizeof(struct ecore_dcbx_set));
+
+ OSAL_MEMSET(&local_admin, 0, sizeof(local_admin));
+ ecore_dcbx_set_local_params(p_hwfn, &local_admin, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ ecore_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);
+
+ if (params->override_flags & ECORE_DCBX_OVERRIDE_DSCP_CFG) {
+ OSAL_MEMSET(&dscp_map, 0, sizeof(dscp_map));
+ ecore_dcbx_set_dscp_params(p_hwfn, &dscp_map, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, dcb_dscp_map);
+ data.dscp_map = &dscp_map;
+ data.size = sizeof(struct dcb_dscp_map);
+ ecore_memcpy_to(p_hwfn, p_ptt, data.addr, data.dscp_map,
+ data.size);
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send DCBX update request\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_set *params)
+{
+ struct ecore_dcbx_get *dcbx_info;
+ int rc;
+
+ if (p_hwfn->p_dcbx_info->set.config.valid) {
+ OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct ecore_dcbx_set));
+ return ECORE_SUCCESS;
+ }
+
+ dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct ecore_dcbx_get));
+ if (!dcbx_info) {
+ DP_ERR(p_hwfn, "Failed to allocate struct ecore_dcbx_info\n");
+ return ECORE_NOMEM;
+ }
+
+ rc = ecore_dcbx_query_params(p_hwfn, dcbx_info,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ if (rc) {
+ OSAL_FREE(p_hwfn->p_dev, dcbx_info);
+ return rc;
+ }
+ p_hwfn->p_dcbx_info->set.override_flags = 0;
+
+ p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED;
+ if (dcbx_info->operational.cee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ if (dcbx_info->operational.ieee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ if (dcbx_info->operational.local)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
+
+ p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params,
+ &dcbx_info->operational.params,
+ sizeof(struct ecore_dcbx_admin_params));
+ p_hwfn->p_dcbx_info->set.config.valid = true;
+
+ OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct ecore_dcbx_set));
+
+ OSAL_FREE(p_hwfn->p_dev, dcbx_info);
+
+ return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index d577f4e1..15186246 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -25,6 +25,8 @@ struct ecore_dcbx_info {
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
struct dcbx_local_params local_admin;
struct ecore_dcbx_results results;
+ struct dcb_dscp_map dscp_map;
+ bool dscp_nig_update;
struct dcbx_mib operational;
struct dcbx_mib remote;
struct ecore_dcbx_set set;
@@ -32,10 +34,15 @@ struct ecore_dcbx_info {
u8 dcbx_cap;
};
-/* Upper layer driver interface routines */
-enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
- struct ecore_ptt *,
- struct ecore_dcbx_set *);
+struct ecore_dcbx_mib_meta_data {
+ struct lldp_config_params_s *lldp_local;
+ struct lldp_status_params_s *lldp_remote;
+ struct dcbx_local_params *local_admin;
+ struct dcb_dscp_map *dscp_map;
+ struct dcbx_mib *mib;
+ osal_size_t size;
+ u32 addr;
+};
/* ECORE local interface routines */
enum _ecore_status_t
@@ -48,8 +55,11 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
+
+#ifndef REAL_ASIC_ONLY
/* @@@TBD eagle phy workaround */
void ecore_dcbx_eagle_workaround(struct ecore_hwfn *, struct ecore_ptt *,
bool set_to_pfc);
+#endif
#endif /* __ECORE_DCBX_H__ */
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index 7767d48e..82416e7f 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -9,7 +9,7 @@
#ifndef __ECORE_DCBX_API_H__
#define __ECORE_DCBX_API_H__
-#include "ecore.h"
+#include "ecore_status.h"
#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
@@ -23,67 +23,89 @@ enum ecore_mib_read_type {
struct ecore_dcbx_app_data {
bool enable; /* DCB enabled */
- bool update; /* Update indication */
+ u8 update; /* Update indication */
u8 priority; /* Priority */
u8 tc; /* Traffic Class */
+ bool dscp_enable; /* DSCP enabled */
+ u8 dscp_val; /* DSCP value */
};
#ifndef __EXTRACT__LINUX__
enum dcbx_protocol_type {
+ DCBX_PROTOCOL_ISCSI,
+ DCBX_PROTOCOL_FCOE,
+ DCBX_PROTOCOL_ROCE,
+ DCBX_PROTOCOL_ROCE_V2,
DCBX_PROTOCOL_ETH,
DCBX_MAX_PROTOCOL_TYPE
};
-#ifdef LINUX_REMOVE
-/* We can't assume THE HSI values are available to clients, so we need
- * to redefine those here.
- */
-#ifndef LLDP_CHASSIS_ID_STAT_LEN
-#define LLDP_CHASSIS_ID_STAT_LEN 4
-#endif
-#ifndef LLDP_PORT_ID_STAT_LEN
-#define LLDP_PORT_ID_STAT_LEN 4
-#endif
-#ifndef DCBX_MAX_APP_PROTOCOL
-#define DCBX_MAX_APP_PROTOCOL 32
-#endif
-
-#endif
+#define ECORE_LLDP_CHASSIS_ID_STAT_LEN 4
+#define ECORE_LLDP_PORT_ID_STAT_LEN 4
+#define ECORE_DCBX_MAX_APP_PROTOCOL 32
+#define ECORE_MAX_PFC_PRIORITIES 8
+#define ECORE_DCBX_DSCP_SIZE 64
struct ecore_dcbx_lldp_remote {
- u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
- bool enable_rx;
- bool enable_tx;
- u32 tx_interval;
- u32 max_credit;
+ u32 peer_chassis_id[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[ECORE_LLDP_PORT_ID_STAT_LEN];
+ bool enable_rx;
+ bool enable_tx;
+ u32 tx_interval;
+ u32 max_credit;
};
struct ecore_dcbx_lldp_local {
- u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 local_chassis_id[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[ECORE_LLDP_PORT_ID_STAT_LEN];
};
struct ecore_dcbx_app_prio {
- u8 eth;
+ u8 roce;
+ u8 roce_v2;
+ u8 fcoe;
+ u8 iscsi;
+ u8 eth;
+};
+
+struct ecore_dbcx_pfc_params {
+ bool willing;
+ bool enabled;
+ u8 prio[ECORE_MAX_PFC_PRIORITIES];
+ u8 max_tc;
+};
+
+enum ecore_dcbx_sf_ieee_type {
+ ECORE_DCBX_SF_IEEE_ETHTYPE,
+ ECORE_DCBX_SF_IEEE_TCP_PORT,
+ ECORE_DCBX_SF_IEEE_UDP_PORT,
+ ECORE_DCBX_SF_IEEE_TCP_UDP_PORT
+};
+
+struct ecore_app_entry {
+ bool ethtype;
+ enum ecore_dcbx_sf_ieee_type sf_ieee;
+ bool enabled;
+ u8 prio;
+ u16 proto_id;
+ enum dcbx_protocol_type proto_type;
};
struct ecore_dcbx_params {
- u32 app_bitmap[DCBX_MAX_APP_PROTOCOL];
- u16 num_app_entries;
- bool app_willing;
- bool app_valid;
- bool ets_willing;
- bool ets_enabled;
- bool valid; /* Indicate validity of params */
- u32 ets_pri_tc_tbl[1];
- u32 ets_tc_bw_tbl[2];
- u32 ets_tc_tsa_tbl[2];
- bool pfc_willing;
- bool pfc_enabled;
- u32 pfc_bitmap;
- u8 max_pfc_tc;
- u8 max_ets_tc;
+ struct ecore_app_entry app_entry[ECORE_DCBX_MAX_APP_PROTOCOL];
+ u16 num_app_entries;
+ bool app_willing;
+ bool app_valid;
+ bool app_error;
+ bool ets_willing;
+ bool ets_enabled;
+ bool ets_cbs;
+ bool valid; /* Indicate validity of params */
+ u8 ets_pri_tc_tbl[ECORE_MAX_PFC_PRIORITIES];
+ u8 ets_tc_bw_tbl[ECORE_MAX_PFC_PRIORITIES];
+ u8 ets_tc_tsa_tbl[ECORE_MAX_PFC_PRIORITIES];
+ struct ecore_dbcx_pfc_params pfc;
+ u8 max_ets_tc;
};
struct ecore_dcbx_admin_params {
@@ -103,22 +125,40 @@ struct ecore_dcbx_operational_params {
bool enabled;
bool ieee;
bool cee;
+ bool local;
u32 err;
};
+struct ecore_dcbx_dscp_params {
+ bool enabled;
+ u8 dscp_pri_map[ECORE_DCBX_DSCP_SIZE];
+};
+
struct ecore_dcbx_get {
struct ecore_dcbx_operational_params operational;
struct ecore_dcbx_lldp_remote lldp_remote;
struct ecore_dcbx_lldp_local lldp_local;
struct ecore_dcbx_remote_params remote;
struct ecore_dcbx_admin_params local;
+ struct ecore_dcbx_dscp_params dscp;
};
#endif
+#define ECORE_DCBX_VERSION_DISABLED 0
+#define ECORE_DCBX_VERSION_IEEE 1
+#define ECORE_DCBX_VERSION_CEE 2
+
struct ecore_dcbx_set {
- struct ecore_dcbx_admin_params config;
+#define ECORE_DCBX_OVERRIDE_STATE (1 << 0)
+#define ECORE_DCBX_OVERRIDE_PFC_CFG (1 << 1)
+#define ECORE_DCBX_OVERRIDE_ETS_CFG (1 << 2)
+#define ECORE_DCBX_OVERRIDE_APP_CFG (1 << 3)
+#define ECORE_DCBX_OVERRIDE_DSCP_CFG (1 << 4)
+ u32 override_flags;
bool enabled;
+ struct ecore_dcbx_admin_params config;
u32 ver_num;
+ struct ecore_dcbx_dscp_params dscp;
};
struct ecore_dcbx_results {
@@ -129,31 +169,27 @@ struct ecore_dcbx_results {
struct ecore_dcbx_app_metadata {
enum dcbx_protocol_type id;
- const char *name; /* @DPDK */
+ const char *name; /* @DPDK */
enum ecore_pci_personality personality;
};
-struct ecore_dcbx_mib_meta_data {
- struct lldp_config_params_s *lldp_local;
- struct lldp_status_params_s *lldp_remote;
- struct dcbx_local_params *local_admin;
- struct dcbx_mib *mib;
- osal_size_t size;
- u32 addr;
-};
-
-void
-ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
- struct ecore_hw_info *p_info,
- bool enable, bool update, u8 prio, u8 tc,
- enum dcbx_protocol_type type,
- enum ecore_pci_personality personality);
-
enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
struct ecore_dcbx_get *,
enum ecore_mib_read_type);
+enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *,
+ struct ecore_dcbx_set *);
+
+enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
+ struct ecore_ptt *,
+ struct ecore_dcbx_set *,
+ bool);
+
static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
+ {DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
+ {DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
+ {DCBX_PROTOCOL_ROCE, "ROCE", ECORE_PCI_ETH_ROCE},
+ {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", ECORE_PCI_ETH_ROCE},
{DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH}
};
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 0a689694..6060f9ee 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -29,11 +29,20 @@
#include "ecore_iro.h"
#include "nvm_cfg.h"
#include "ecore_dev_api.h"
-#include "ecore_attn_values.h"
#include "ecore_dcbx.h"
+/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
+ * registers involved are not split and thus configuration is a race where
+ * some of the PFs configuration might be lost.
+ * Eventually, this needs to move into a MFW-covered HW-lock as arbitration
+ * mechanism as this doesn't cover some cases [E.g., PDA or scenarios where
+ * there's more than a single compiled ecore component in system].
+ */
+static osal_spinlock_t qm_lock;
+static bool qm_lock_init;
+
/* Configurable */
-#define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required
+#define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to
* load the driver. The number was
* arbitrarily set.
*/
@@ -50,7 +59,17 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
- u32 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ u32 val;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be
+ * read from actual register, but we're currently not using
+ * it for actual doorbelling.
+ */
+ return 1 << 17;
+ }
+
+ val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
/* The above registers were updated in the past only in CMT mode. Since
* they were found to be useful MFW started updating them from 8.7.7.0.
@@ -59,15 +78,17 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
if (!val) {
if (p_hwfn->p_dev->num_hwfns > 1) {
DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR"
- " size of 256kB for GRC and 512kB for DB\n");
+ "BAR size not configured. Assuming BAR size");
+ DP_NOTICE(p_hwfn, false,
+ "of 256kB for GRC and 512kB for DB\n");
return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR size");
+ DP_NOTICE(p_hwfn, false,
+ "of 512kB for GRC and 512kB for DB\n");
+ return 512 * 1024;
}
-
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR"
- " size of 512kB for GRC and 512kB for DB\n");
- return 512 * 1024;
}
return 1 << (val + 15);
@@ -156,6 +177,9 @@ void ecore_resc_free(struct ecore_dev *p_dev)
ecore_eq_free(p_hwfn, p_hwfn->p_eq);
ecore_consq_free(p_hwfn, p_hwfn->p_consq);
ecore_int_free(p_hwfn);
+#ifdef CONFIG_ECORE_LL2
+ ecore_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
+#endif
ecore_iov_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
@@ -166,16 +190,28 @@ void ecore_resc_free(struct ecore_dev *p_dev)
static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
bool b_sleepable)
{
- u8 num_vports, vf_offset = 0, i, vport_id, num_ports;
+ u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue;
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
- u16 num_pqs, multi_cos_tcs = 1;
-#ifdef CONFIG_ECORE_SRIOV
- u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
-#else
+ bool init_rdma_offload_pq = false;
+ bool init_pure_ack_pq = false;
+ bool init_ooo_pq = false;
+ u16 num_pqs, protocol_pqs;
+ u16 num_pf_rls = 0;
u16 num_vfs = 0;
-#endif
+ u32 pf_rl;
+ u8 pf_wfq;
+
+ /* @TMP - saving the existing min/max bw config before resetting the
+ * qm_info to restore them.
+ */
+ pf_rl = qm_info->pf_rl;
+ pf_wfq = qm_info->pf_wfq;
+#ifdef CONFIG_ECORE_SRIOV
+ if (p_hwfn->p_dev->p_iov_info)
+ num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+#endif
OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
#ifndef ASIC_ONLY
@@ -187,20 +223,74 @@ static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
}
#endif
- num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
+ /* ethernet PFs require a pq per tc. Even if only a subset of the TCs
+ * active, we want physical queues allocated for all of them, since we
+ * don't have a good recycle flow. Non ethernet PFs require only a
+ * single physical queue.
+ */
+ if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE ||
+ p_hwfn->hw_info.personality == ECORE_PCI_IWARP ||
+ p_hwfn->hw_info.personality == ECORE_PCI_ETH)
+ protocol_pqs = p_hwfn->hw_info.num_hw_tc;
+ else
+ protocol_pqs = 1;
+
+ num_pqs = protocol_pqs + num_vfs + 1; /* The '1' is for pure-LB */
num_vports = (u8)RESC_NUM(p_hwfn, ECORE_VPORT);
+ if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
+ num_pqs++; /* for RoCE queue */
+ init_rdma_offload_pq = true;
+ if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn) {
+ /* Due to FW assumption that rl==vport, we limit the
+ * number of rate limiters by the minimum between its
+ * allocated number and the allocated number of vports.
+ * Another limitation is the number of supported qps
+ * with rate limiters in FW.
+ */
+ num_pf_rls =
+ (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
+ RESC_NUM(p_hwfn, ECORE_VPORT));
+
+ /* we subtract num_vfs because each one requires a rate
+ * limiter, and one default rate limiter.
+ */
+ if (num_pf_rls < num_vfs + 1) {
+ DP_ERR(p_hwfn, "No RL for DCQCN");
+ DP_ERR(p_hwfn, "[num_pf_rls %d num_vfs %d]\n",
+ num_pf_rls, num_vfs);
+ return ECORE_INVAL;
+ }
+ num_pf_rls -= num_vfs + 1;
+ }
+
+ num_pqs += num_pf_rls;
+ qm_info->num_pf_rls = (u8)num_pf_rls;
+ }
+
+ if (p_hwfn->hw_info.personality == ECORE_PCI_IWARP) {
+ num_pqs += 3; /* for iwarp queue / pure-ack / ooo */
+ init_rdma_offload_pq = true;
+ init_pure_ack_pq = true;
+ init_ooo_pq = true;
+ }
+
+ if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
+ num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
+ init_pure_ack_pq = true;
+ init_ooo_pq = true;
+ }
+
/* Sanity checking that setup requires legal number of resources */
if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) {
DP_ERR(p_hwfn,
- "Need too many Physical queues - 0x%04x when"
- " only %04x are available\n",
+ "Need too many Physical queues - 0x%04x avail %04x",
num_pqs, RESC_NUM(p_hwfn, ECORE_PQ));
return ECORE_INVAL;
}
/* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue,
- * then special queues, then per-VF PQ.
+ * then special queues (iSCSI pure-ACK / RoCE), then per-VF PQ.
*/
qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev,
b_sleepable ? GFP_KERNEL :
@@ -238,13 +328,40 @@ static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
vport_id = (u8)RESC_START(p_hwfn, ECORE_VPORT);
- /* First init per-TC PQs */
- for (i = 0; i < multi_cos_tcs; i++) {
- struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
+ /* First init rate limited queues ( Due to RoCE assumption of
+ * qpid=rlid )
+ */
+ for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ qm_info->qm_pq_params[curr_queue].rl_valid = 1;
+ };
+
+ /* Protocol PQs */
+ for (i = 0; i < protocol_pqs; i++) {
+ struct init_qm_pq_params *params =
+ &qm_info->qm_pq_params[curr_queue++];
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH) {
+ if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE ||
+ p_hwfn->hw_info.personality == ECORE_PCI_IWARP ||
+ p_hwfn->hw_info.personality == ECORE_PCI_ETH) {
params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.non_offload_tc;
+ params->tc_id = i;
+ /* Note: this assumes that if we had a configuration
+ * with N tcs and subsequently another configuration
+ * With Fewer TCs, the in flight traffic (in QM queues,
+ * in FW, from driver to FW) will still trickle out and
+ * not get "stuck" in the QM. This is determined by the
+ * NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ. Unused TCs are
+ * supposed to be cleared in this map, allowing traffic
+ * to flush out. If this is not the case, we would need
+ * to set the TC of unused queues to 0, and reconfigure
+ * QM every time num of TCs changes. Unused queues in
+ * this context would mean those intended for TCs where
+ * tc_id > hw_info.num_active_tcs.
+ */
params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */
} else {
params->vport_id = vport_id;
@@ -254,22 +371,50 @@ static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
}
/* Then init pure-LB PQ */
- qm_info->pure_lb_pq = i;
- qm_info->qm_pq_params[i].vport_id =
+ qm_info->pure_lb_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id =
(u8)RESC_START(p_hwfn, ECORE_VPORT);
- qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[i].wrr_group = 1;
- i++;
+ qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+
+ qm_info->offload_pq = 0; /* Already initialized for iSCSI/FCoE */
+ if (init_rdma_offload_pq) {
+ qm_info->offload_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ if (init_pure_ack_pq) {
+ qm_info->pure_ack_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ if (init_ooo_pq) {
+ qm_info->ooo_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
/* Then init per-VF PQs */
- vf_offset = i;
+ vf_offset = curr_queue;
for (i = 0; i < num_vfs; i++) {
/* First vport is used by the PF */
- qm_info->qm_pq_params[vf_offset + i].vport_id = vport_id +
- i + 1;
- qm_info->qm_pq_params[vf_offset + i].tc_id =
- p_hwfn->hw_info.non_offload_tc;
- qm_info->qm_pq_params[vf_offset + i].wrr_group = 1;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
+ /* @@@TBD VF Multi-cos */
+ qm_info->qm_pq_params[curr_queue].tc_id = 0;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ qm_info->qm_pq_params[curr_queue].rl_valid = 1;
+ curr_queue++;
};
qm_info->vf_queues_offset = vf_offset;
@@ -285,9 +430,9 @@ static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
* be in place
*/
if (num_ports == 4)
- p_qm_port->num_active_phys_tcs = 2;
+ p_qm_port->active_phys_tcs = 0xf;
else
- p_qm_port->num_active_phys_tcs = 5;
+ p_qm_port->active_phys_tcs = 0x9f;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
@@ -305,14 +450,14 @@ static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
for (i = 0; i < qm_info->num_vports; i++)
qm_info->qm_vport_params[i].vport_wfq = 1;
- qm_info->pf_wfq = 0;
- qm_info->pf_rl = 0;
qm_info->vport_rl_en = 1;
qm_info->vport_wfq_en = 1;
+ qm_info->pf_rl = pf_rl;
+ qm_info->pf_wfq = pf_wfq;
return ECORE_SUCCESS;
-alloc_err:
+ alloc_err:
DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
ecore_qm_info_free(p_hwfn);
return ECORE_NOMEM;
@@ -330,8 +475,8 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- enum _ecore_status_t rc;
bool b_rc;
+ enum _ecore_status_t rc;
/* qm_info is allocated in ecore_init_qm_info() which is already called
* from ecore_resc_alloc() or previous call of ecore_qm_reconf().
@@ -346,8 +491,10 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
return rc;
/* stop PF's qm queues */
+ OSAL_SPIN_LOCK(&qm_lock);
b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
qm_info->start_pq, qm_info->num_pqs);
+ OSAL_SPIN_UNLOCK(&qm_lock);
if (!b_rc)
return ECORE_INVAL;
@@ -364,9 +511,11 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
return rc;
/* start PF's qm queues */
+ OSAL_SPIN_LOCK(&qm_lock);
b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
qm_info->start_pq, qm_info->num_pqs);
- if (!rc)
+ OSAL_SPIN_UNLOCK(&qm_lock);
+ if (!b_rc)
return ECORE_INVAL;
return ECORE_SUCCESS;
@@ -374,16 +523,19 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_consq *p_consq;
struct ecore_eq *p_eq;
+#ifdef CONFIG_ECORE_LL2
+ struct ecore_ll2_info *p_ll2_info;
+#endif
+ enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
if (IS_VF(p_dev))
return rc;
p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
- sizeof(struct ecore_fw_data));
+ sizeof(*p_dev->fw_data));
if (!p_dev->fw_data)
return ECORE_NOMEM;
@@ -416,6 +568,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ u32 n_eqes, num_cons;
/* First allocate the context manager structure */
rc = ecore_cxt_mngr_alloc(p_hwfn);
@@ -464,7 +617,60 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
goto alloc_err;
/* EQ */
- p_eq = ecore_eq_alloc(p_hwfn, 256);
+ n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
+ if ((p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) ||
+ (p_hwfn->hw_info.personality == ECORE_PCI_IWARP)) {
+ /* Calculate the EQ size
+ * ---------------------
+ * Each ICID may generate up to one event at a time i.e.
+ * the event must be handled/cleared before a new one
+ * can be generated. We calculate the sum of events per
+ * protocol and create an EQ deep enough to handle the
+ * worst case:
+ * - Core - according to SPQ.
+ * - RoCE - per QP there are a couple of ICIDs, one
+ * responder and one requester, each can
+ * generate an EQE => n_eqes_qp = 2 * n_qp.
+ * Each CQ can generate an EQE. There are 2 CQs
+ * per QP => n_eqes_cq = 2 * n_qp.
+ * Hence the RoCE total is 4 * n_qp or
+ * 2 * num_cons.
+ * - ENet - There can be up to two events per VF. One
+ * for VF-PF channel and another for VF FLR
+ * initial cleanup. The number of VFs is
+ * bounded by MAX_NUM_VFS_BB, and is much
+ * smaller than RoCE's so we avoid exact
+ * calculation.
+ */
+ if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
+ num_cons =
+ ecore_cxt_get_proto_cid_count(
+ p_hwfn,
+ PROTOCOLID_ROCE,
+ 0);
+ num_cons *= 2;
+ } else {
+ num_cons = ecore_cxt_get_proto_cid_count(
+ p_hwfn,
+ PROTOCOLID_IWARP,
+ 0);
+ }
+ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
+ } else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
+ num_cons =
+ ecore_cxt_get_proto_cid_count(p_hwfn,
+ PROTOCOLID_ISCSI, 0);
+ n_eqes += 2 * num_cons;
+ }
+
+ if (n_eqes > 0xFFFF) {
+ DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements."
+ "The maximum of a u16 chain is 0x%x\n",
+ n_eqes, 0xFFFF);
+ goto alloc_err;
+ }
+
+ p_eq = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
if (!p_eq)
goto alloc_no_mem;
p_hwfn->p_eq = p_eq;
@@ -474,12 +680,20 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
goto alloc_no_mem;
p_hwfn->p_consq = p_consq;
+#ifdef CONFIG_ECORE_LL2
+ if (p_hwfn->using_ll2) {
+ p_ll2_info = ecore_ll2_alloc(p_hwfn);
+ if (!p_ll2_info)
+ goto alloc_no_mem;
+ p_hwfn->p_ll2_info = p_ll2_info;
+ }
+#endif
+
/* DMA info initialization */
rc = ecore_dmae_info_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for"
- " dmae_info structure\n");
+ "Failed to allocate memory for dmae_info structure\n");
goto alloc_err;
}
@@ -487,7 +701,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
rc = ecore_dcbx_info_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for dcbxstruct\n");
+ "Failed to allocate memory for dcbx structure\n");
goto alloc_err;
}
}
@@ -501,9 +715,9 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
return ECORE_SUCCESS;
-alloc_no_mem:
+ alloc_no_mem:
rc = ECORE_NOMEM;
-alloc_err:
+ alloc_err:
ecore_resc_free(p_dev);
return rc;
}
@@ -532,6 +746,10 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+#ifdef CONFIG_ECORE_LL2
+ if (p_hwfn->using_ll2)
+ ecore_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
+#endif
}
}
@@ -564,11 +782,13 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
- /* Make sure notification is not set before initiating final cleanup */
+/* Make sure notification is not set before initiating final cleanup */
+
if (REG_RD(p_hwfn, addr)) {
DP_NOTICE(p_hwfn, false,
- "Unexpected; Found final cleanup notification "
- "before initiating final cleanup\n");
+ "Unexpected; Found final cleanup notification");
+ DP_NOTICE(p_hwfn, false,
+ " before initiating final cleanup\n");
REG_WR(p_hwfn, addr, 0);
}
@@ -595,24 +815,20 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
return rc;
}
-static void ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
+static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
{
int hw_mode = 0;
- switch (ECORE_GET_TYPE(p_hwfn->p_dev)) {
- case CHIP_BB_A0:
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_BB_A0;
- break;
- case CHIP_BB_B0:
+ } else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_BB_B0;
- break;
- case CHIP_K2:
+ } else if (ECORE_IS_AH(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_K2;
- break;
- default:
- DP_NOTICE(p_hwfn, true, "Can't initialize chip ID %d\n",
- ECORE_GET_TYPE(p_hwfn->p_dev));
- return;
+ } else {
+ DP_NOTICE(p_hwfn, true, "Unknown chip type %#x\n",
+ p_hwfn->p_dev->type);
+ return ECORE_INVAL;
}
/* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
@@ -630,7 +846,7 @@ static void ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
DP_NOTICE(p_hwfn, true,
"num_ports_in_engine = %d not supported\n",
p_hwfn->p_dev->num_ports_in_engines);
- return;
+ return ECORE_INVAL;
}
switch (p_hwfn->p_dev->mf_mode) {
@@ -661,8 +877,10 @@ static void ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
#endif
hw_mode |= 1 << MODE_ASIC;
+#ifndef REAL_ASIC_ONLY
if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND;
+#endif
if (p_hwfn->p_dev->num_hwfns > 1)
hw_mode |= 1 << MODE_100G;
@@ -672,12 +890,14 @@ static void ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
"Configuring function for hw_mode: 0x%08x\n",
p_hwfn->hw_info.hw_mode);
+
+ return ECORE_SUCCESS;
}
#ifndef ASIC_ONLY
/* MFW-replacement initializations for non-ASIC */
-static void ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u32 pl_hv = 1;
int i;
@@ -690,37 +910,6 @@ static void ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2, 0x3ffffff);
- /* initialize interrupt masks */
- for (i = 0;
- i <
- attn_blocks[BLOCK_MISCS].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
- num_of_int_regs; i++)
- ecore_wr(p_hwfn, p_ptt,
- attn_blocks[BLOCK_MISCS].
- chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[i]->
- mask_addr, 0);
-
- if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
- ecore_wr(p_hwfn, p_ptt,
- attn_blocks[BLOCK_CNIG].
- chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
- mask_addr, 0);
- ecore_wr(p_hwfn, p_ptt,
- attn_blocks[BLOCK_PGLCS].
- chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
- mask_addr, 0);
- ecore_wr(p_hwfn, p_ptt,
- attn_blocks[BLOCK_CPMU].
- chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
- mask_addr, 0);
- /* Currently A0 and B0 interrupt bits are the same in pglue_b;
- * If this changes, need to set this according to chip type. <14/09/23>
- */
- ecore_wr(p_hwfn, p_ptt,
- attn_blocks[BLOCK_PGLUE_B].
- chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
- mask_addr, 0x80000);
-
/* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
/* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
@@ -745,6 +934,8 @@ static void ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
if (i == 100)
DP_NOTICE(p_hwfn, true,
"RBC done failed to complete in PSWRQ2\n");
+
+ return ECORE_SUCCESS;
}
#endif
@@ -784,11 +975,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
int hw_mode)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_dev *p_dev = p_hwfn->p_dev;
u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id;
u32 concrete_fid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
ecore_init_cau_rt_data(p_dev);
@@ -796,8 +987,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_gtt_init(p_hwfn);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
- ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
#endif
if (p_hwfn->mcp_info) {
@@ -839,9 +1033,15 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
if (ECORE_IS_BB(p_hwfn->p_dev)) {
+ /* Workaround clears ROCE search for all functions to prevent
+ * involving non initialized function in processing ROCE packet.
+ */
num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
- if (num_pfs == 1)
- return rc;
+ for (pf_id = 0; pf_id < num_pfs; pf_id++) {
+ ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ }
/* pretend to original PF */
ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
@@ -858,6 +1058,9 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ ecore_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
+ ecore_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
+ ecore_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
}
/* pretend to original PF */
ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
@@ -948,11 +1151,15 @@ static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
return;
}
+ /* XLPORT MAC MODE *//* 0 Quad, 4 Single... */
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
- port);
+ port);
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
+ /* XLMAC: SOFT RESET */
ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port);
+ /* XLMAC: Port Speed >= 10Gbps */
ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port);
+ /* XLMAC: Max Size */
ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port);
ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
@@ -961,10 +1168,14 @@ static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
0x30ffffc000ULL, 0, port);
ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0,
- port);
- ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
- 0, port);
+ port); /* XLMAC: TX_EN, RX_EN */
+ /* XLMAC: TX_EN, RX_EN, SW_LINK_STATUS */
+ ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL,
+ 0x1003 | (loopback << 2), 0, port);
+ /* Enabled Parallel PFC interface */
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port);
+
+ /* XLPORT port enable */
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
}
@@ -977,10 +1188,10 @@ static void ecore_link_init(struct ecore_hwfn *p_hwfn,
/* Reset of XMAC */
/* FIXME: move to common start */
ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
- MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
+ MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
OSAL_MSLEEP(1);
ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
- MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
+ MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE, 1);
@@ -1019,12 +1230,10 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- /* Init sequence */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
-
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
return ECORE_SUCCESS;
@@ -1063,14 +1272,75 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
+ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
+{
+ u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
+ u32 dpi_bit_shift, dpi_count;
+ u32 min_dpis;
+
+ /* Calculate DPI size
+ * ------------------
+ * The PWM region contains Doorbell Pages. The first is reserverd for
+ * the kernel for, e.g, L2. The others are free to be used by non-
+ * trusted applications, typically from user space. Each page, called a
+ * doorbell page is sectioned into windows that allow doorbells to be
+ * issued in parallel by the kernel/application. The size of such a
+ * window (a.k.a. WID) is 1kB.
+ * Summary:
+ * 1kB WID x N WIDS = DPI page size
+ * DPI page size x N DPIs = PWM region size
+ * Notes:
+ * The size of the DPI page size must be in multiples of OSAL_PAGE_SIZE
+ * in order to ensure that two applications won't share the same page.
+ * It also must contain at least one WID per CPU to allow parallelism.
+ * It also must be a power of 2, since it is stored as a bit shift.
+ *
+ * The DPI page size is stored in a register as 'dpi_bit_shift' so that
+ * 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
+ * containing 4 WIDs.
+ */
+ dpi_page_size_1 = ECORE_WID_SIZE * n_cpus;
+ dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE);
+ dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2);
+ dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size);
+ dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
+
+ dpi_count = pwm_region_size / dpi_page_size;
+
+ min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
+ min_dpis = OSAL_MAX_T(u32, ECORE_MIN_DPIS, min_dpis);
+
+ /* Update hwfn */
+ p_hwfn->dpi_size = dpi_page_size;
+ p_hwfn->dpi_count = dpi_count;
+
+ /* Update registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+
+ if (dpi_count < min_dpis)
+ return ECORE_NORESOURCES;
+
+ return ECORE_SUCCESS;
+}
+
+enum ECORE_ROCE_EDPM_MODE {
+ ECORE_ROCE_EDPM_MODE_ENABLE = 0,
+ ECORE_ROCE_EDPM_MODE_FORCE_ON = 1,
+ ECORE_ROCE_EDPM_MODE_DISABLE = 2,
+};
+
+static enum _ecore_status_t
ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 pwm_regsize, norm_regsize;
u32 non_pwm_conn, min_addr_reg1;
u32 db_bar_size, n_cpus;
+ u32 roce_edpm_mode;
u32 pf_dems_shift;
int rc = ECORE_SUCCESS;
+ u8 cond;
db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
if (p_hwfn->p_dev->num_hwfns > 1)
@@ -1101,26 +1371,62 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
/* Check that the normal and PWM sizes are valid */
if (db_bar_size < norm_regsize) {
DP_ERR(p_hwfn->p_dev,
- "Doorbell BAR size 0x%x is too"
- " small (normal region is 0x%0x )\n",
+ "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
db_bar_size, norm_regsize);
return ECORE_NORESOURCES;
}
if (pwm_regsize < ECORE_MIN_PWM_REGION) {
DP_ERR(p_hwfn->p_dev,
- "PWM region size 0x%0x is too small."
- " Should be at least 0x%0x (Doorbell BAR size"
- " is 0x%x and normal region size is 0x%0x)\n",
+ "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
norm_regsize);
return ECORE_NORESOURCES;
}
+ /* Calculate number of DPIs */
+ roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
+ if ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE) ||
+ ((roce_edpm_mode == ECORE_ROCE_EDPM_MODE_FORCE_ON))) {
+ /* Either EDPM is mandatory, or we are attempting to allocate a
+ * WID per CPU.
+ */
+ n_cpus = OSAL_NUM_ACTIVE_CPU();
+ rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+ }
+
+ cond = ((rc) && (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
+ if (cond || p_hwfn->dcbx_no_edpm) {
+ /* Either EDPM is disabled from user configuration, or it is
+ * disabled via DCBx, or it is not mandatory and we failed to
+ * allocated a WID per CPU.
+ */
+ n_cpus = 1;
+ rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+
+ /* If we entered this flow due to DCBX then the DPM register is
+ * already configured.
+ */
+ }
+
+ DP_INFO(p_hwfn,
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d",
+ norm_regsize, pwm_regsize);
+ DP_INFO(p_hwfn,
+ " dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
+ p_hwfn->dpi_size, p_hwfn->dpi_count,
+ ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ "disabled" : "enabled");
+
+ /* Check return codes from above calls */
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed to allocate enough DPIs\n");
+ return ECORE_NORESOURCES;
+ }
+
/* Update hwfn */
- p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to
- * calculate the doorbell
- * address
- */
+ p_hwfn->dpi_start_offset = norm_regsize;
/* Update registers */
/* DEMS size is configured log2 of DWORDs, hence the division by 4 */
@@ -1128,12 +1434,6 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
- DP_INFO(p_hwfn,
- "Doorbell size 0x%x, Normal region 0x%x, PWM region 0x%x\n",
- db_bar_size, norm_regsize, pwm_regsize);
- DP_INFO(p_hwfn, "DPI size 0x%x, DPI count 0x%x\n", p_hwfn->dpi_size,
- p_hwfn->dpi_count);
-
return ECORE_SUCCESS;
}
@@ -1145,13 +1445,12 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
bool b_hw_start,
enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
u8 rel_pf_id = p_hwfn->rel_pf_id;
u32 prs_reg;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
u16 ctrl;
int pos;
- /* ILT/DQ/CM/QM */
if (p_hwfn->mcp_info) {
struct ecore_mcp_function_info *p_info;
@@ -1160,11 +1459,11 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
/* Update rate limit once we'll actually have a link */
- p_hwfn->qm_info.pf_rl = 100;
+ p_hwfn->qm_info.pf_rl = 100000;
}
ecore_cxt_hw_init_pf(p_hwfn);
- ecore_int_igu_init_rt(p_hwfn); /* @@@TBD TODO MichalS multi hwfn ?? */
+ ecore_int_igu_init_rt(p_hwfn);
/* Set VLAN in NIG if needed */
if (hw_mode & (1 << MODE_MF_SD)) {
@@ -1183,7 +1482,11 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
}
/* Protocl Configuration - @@@TBD - should we set 0 otherwise? */
- STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
+ (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) ? 1 : 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
+ (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) ? 1 : 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
/* perform debug configuration when chip is out of reset */
OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
@@ -1213,43 +1516,20 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
* PCI config space.
*/
/* Not in use @DPDK
- * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
- * if (!pos) {
- * DP_NOTICE(p_hwfn, true,
- * "Failed to find the PCI Express"
- * " Capability structure in the PCI config space\n");
- * return ECORE_IO;
- * }
- * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL,
- * &ctrl);
- * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
- * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL,
- * &ctrl);
- */
-
-#ifndef ASIC_ONLY
- /*@@TMP - On B0 build 1, need to mask the datapath_registers parity */
- if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev) &&
- (p_hwfn->p_dev->chip_metal == 1)) {
- u32 reg_addr, tmp;
-
- reg_addr =
- attn_blocks[BLOCK_PGLUE_B].
- chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].prty_regs[0]->
- mask_addr;
- DP_NOTICE(p_hwfn, false,
- "Masking datapath registers parity on"
- " B0 emulation [build 1]\n");
- tmp = ecore_rd(p_hwfn, p_ptt, reg_addr);
- tmp |= (1 << 0); /* Was PRTY_MASK_DATAPATH_REGISTERS */
- ecore_wr(p_hwfn, p_ptt, reg_addr, tmp);
- }
-#endif
+ * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
+ * if (!pos) {
+ * DP_NOTICE(p_hwfn, true,
+ * "Failed to find the PCIe Cap\n");
+ * return ECORE_IO;
+ * }
+ * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, &ctrl);
+ * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
+ * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, ctrl);
+ */
rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
if (rc)
return rc;
-
if (b_hw_start) {
/* enable interrupts */
ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
@@ -1265,14 +1545,27 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
+ if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) {
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1,
+ (1 << 2));
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
+ 0x100);
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
- "PRS_REG_SEARCH register after start PFn\n");
+ "PRS_REG_SEARCH registers after start PFn\n");
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_TCP: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
"PRS_REG_SEARCH_UDP: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_FCOE: %x\n", prs_reg);
+ prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
+ "PRS_REG_SEARCH_ROCE: %x\n", prs_reg);
prs_reg = ecore_rd(p_hwfn, p_ptt,
PRS_REG_SEARCH_TCP_FIRST_FRAG);
DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
@@ -1326,18 +1619,20 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
}
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
- struct ecore_tunn_start_params *p_tunn,
- bool b_hw_start,
- enum ecore_int_mode int_mode,
- bool allow_npar_tx_switch,
- const u8 *bin_fw_data)
+ struct ecore_hw_init_params *p_params)
{
enum _ecore_status_t rc, mfw_rc;
u32 load_code, param;
int i, j;
+ if (p_params->int_mode == ECORE_INT_MODE_MSI && p_dev->num_hwfns > 1) {
+ DP_NOTICE(p_dev, false,
+ "MSI mode is not supported for CMT devices\n");
+ return ECORE_INVAL;
+ }
+
if (IS_PF(p_dev)) {
- rc = ecore_init_fw_data(p_dev, bin_fw_data);
+ rc = ecore_init_fw_data(p_dev, p_params->bin_fw_data);
if (rc != ECORE_SUCCESS)
return rc;
}
@@ -1346,16 +1641,19 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
if (IS_VF(p_dev)) {
- rc = ecore_vf_pf_init(p_hwfn);
- if (rc)
- return rc;
+ p_hwfn->b_int_enabled = 1;
continue;
}
/* Enable DMAE in PXP */
rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_calc_hw_mode(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- ecore_calc_hw_mode(p_hwfn);
/* @@@TBD need to add here:
* Check for fan failure
* Prev_unload
@@ -1380,7 +1678,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "Load request was sent.Resp:0x%x, Load code: 0x%x\n",
+ "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
rc, load_code);
/* Only relevant for recovery:
@@ -1392,6 +1690,11 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
+ if (!qm_lock_init) {
+ OSAL_SPIN_LOCK_INIT(&qm_lock);
+ qm_lock_init = true;
+ }
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -1405,6 +1708,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc)
break;
+#ifndef REAL_ASIC_ONLY
if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
struct init_nig_pri_tc_map_req tc_map;
@@ -1421,12 +1725,15 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_hwfn->p_main_ptt,
&tc_map);
}
- /* fallthrough */
+#endif
+ /* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn, p_hwfn->hw_info.hw_mode,
- b_hw_start, int_mode,
- allow_npar_tx_switch);
+ p_params->p_tunn,
+ p_hwfn->hw_info.hw_mode,
+ p_params->b_hw_start,
+ p_params->int_mode,
+ p_params->allow_npar_tx_switch);
break;
default:
rc = ECORE_NOTIMPL;
@@ -1435,7 +1742,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, true,
- "init phase failed loadcode 0x%x (rc %d)\n",
+ "init phase failed for loadcode 0x%x (rc %d)\n",
load_code, rc);
/* ACK mfw regardless of success or failure of initialization */
@@ -1450,10 +1757,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
return mfw_rc;
}
+ ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->epoch);
+
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "sending phony dcbx set command to trigger DCBx"
- " attention handling\n");
+ "sending phony dcbx set command to trigger DCBx attention handling\n");
mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_SET_DCBX,
1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
@@ -1471,17 +1781,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
#define ECORE_HW_STOP_RETRY_LIMIT (10)
-static OSAL_INLINE void ecore_hw_timers_stop(struct ecore_dev *p_dev,
- struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
int i;
/* close timers */
ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
- for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT &&
- !p_dev->recov_in_prog; i++) {
+ for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT && !p_dev->recov_in_prog;
+ i++) {
if ((!ecore_rd(p_hwfn, p_ptt,
TM_REG_PF_SCAN_ACTIVE_CONN)) &&
(!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
@@ -1494,12 +1804,11 @@ static OSAL_INLINE void ecore_hw_timers_stop(struct ecore_dev *p_dev,
}
if (i == ECORE_HW_STOP_RETRY_LIMIT)
DP_NOTICE(p_hwfn, true,
- "Timers linear scans are not over"
- " [Connection %02x Tasks %02x]\n",
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
(u8)ecore_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN),
+ TM_REG_PF_SCAN_ACTIVE_CONN),
(u8)ecore_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK));
+ TM_REG_PF_SCAN_ACTIVE_TASK));
}
void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
@@ -1536,9 +1845,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
rc = ecore_sp_pf_stop(p_hwfn);
if (rc)
DP_NOTICE(p_hwfn, true,
- "Failed to close PF against FW. Continue to"
- " stop HW to prevent illegal host access"
- " by the device\n");
+ "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
/* perform debug action after PF stop was sent */
OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id);
@@ -1550,6 +1857,8 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
/* close parser */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
/* @@@TBD - clean transmission queues (5.b) */
@@ -1603,6 +1912,8 @@ void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
/* @@@TBD - clean transmission queues (5.b) */
@@ -1623,6 +1934,16 @@ void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
if (IS_VF(p_hwfn->p_dev))
return;
+ /* If roce info is allocated it means roce is initialized and should
+ * be enabled in searcher.
+ */
+ if (p_hwfn->p_rdma_info) {
+ if (p_hwfn->b_rdma_enabled_in_prs)
+ ecore_wr(p_hwfn, p_ptt,
+ p_hwfn->rdma_prs_search_reg, 0x1);
+ ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x1);
+ }
+
/* Re-open incoming traffic */
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
@@ -1673,15 +1994,10 @@ enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)
/* Disable PF in HW blocks */
ecore_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- TCFC_REG_STRONG_ENABLE_PF, 0);
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- CCFC_REG_STRONG_ENABLE_PF, 0);
if (p_dev->recov_in_prog) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
- "Recovery is in progress -> skip "
- "sending unload_req/done\n");
+ "Recovery is in progress -> skip sending unload_req/done\n");
break;
}
@@ -1722,10 +2038,25 @@ static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
{
/* clear indirect access */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+ if (ECORE_IS_AH(p_hwfn->p_dev)) {
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_E8_F0, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_EC_F0, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F0_F0, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F4_F0, 0);
+ } else {
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+ }
/* Clean Previous errors if such exist */
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -1740,12 +2071,11 @@ static void get_function_id(struct ecore_hwfn *p_hwfn)
{
/* ME Register */
p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
- PXP_PF_ME_OPAQUE_ADDR);
+ PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
/* Bits 16-19 from the ME registers are the pf_num */
- /* @@ @TBD - check, may be wrong after B0 implementation for CMT */
p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
PXP_CONCRETE_FID_PFID);
@@ -1769,59 +2099,285 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
- "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
+ "#PF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
feat_num[ECORE_PF_L2_QUE],
+ feat_num[ECORE_RDMA_CNQ],
RESC_NUM(p_hwfn, ECORE_SB), num_features);
}
-/* @@@TBD MK RESC: This info is currently hard code and set as if we were MF
- * need to read it from shmem...
- */
-static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn)
+static enum resource_id_enum
+ecore_hw_get_mfw_res_id(enum ecore_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case ECORE_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case ECORE_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case ECORE_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case ECORE_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case ECORE_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case ECORE_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case ECORE_MAC:
+ case ECORE_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case ECORE_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case ECORE_LL2_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case ECORE_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+static u32 ecore_hw_get_dflt_resc_num(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id)
{
- u32 *resc_start = p_hwfn->hw_info.resc_start;
u8 num_funcs = p_hwfn->num_funcs_on_engine;
- u32 *resc_num = p_hwfn->hw_info.resc_num;
- int i, max_vf_vlan_filters;
- struct ecore_sb_cnt_info sb_cnt_info;
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ struct ecore_sb_cnt_info sb_cnt_info;
+ u32 dflt_resc_num = 0;
- OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
+ switch (res_id) {
+ case ECORE_SB:
+ OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
+ ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ dflt_resc_num = sb_cnt_info.sb_cnt;
+ break;
+ case ECORE_L2_QUEUE:
+ dflt_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ MAX_NUM_L2_QUEUES_BB) / num_funcs;
+ break;
+ case ECORE_VPORT:
+ dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
+ break;
+ case ECORE_RSS_ENG:
+ dflt_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ ETH_RSS_ENGINE_NUM_BB) / num_funcs;
+ break;
+ case ECORE_PQ:
+ dflt_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ MAX_QM_TX_QUEUES_BB) / num_funcs;
+ break;
+ case ECORE_RL:
+ dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ break;
+ case ECORE_MAC:
+ case ECORE_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ break;
+ case ECORE_ILT:
+ dflt_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+ break;
+ case ECORE_LL2_QUEUE:
+ dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ break;
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ /* @DPDK */
+ dflt_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
+ break;
+ case ECORE_RDMA_STATS_QUEUE:
+ /* @DPDK */
+ dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ MAX_NUM_VPORTS_BB) / num_funcs;
+ break;
+ default:
+ break;
+ }
-#ifdef CONFIG_ECORE_SRIOV
- max_vf_vlan_filters = ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS;
-#else
- max_vf_vlan_filters = 0;
+ return dflt_resc_num;
+}
+
+static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id,
+ bool drv_resc_alloc)
+{
+ u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
+ u32 *p_resc_num, *p_resc_start;
+ struct resource_info resc_info;
+ enum _ecore_status_t rc;
+
+ p_resc_num = &RESC_NUM(p_hwfn, res_id);
+ p_resc_start = &RESC_START(p_hwfn, res_id);
+
+ dflt_resc_num = ecore_hw_get_dflt_resc_num(p_hwfn, res_id);
+ if (!dflt_resc_num) {
+ DP_ERR(p_hwfn, "Failed to get default amount for resource %d\n",
+ res_id);
+ return ECORE_INVAL;
+ }
+ dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ goto out;
+ }
#endif
- ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- resc_num[ECORE_SB] = OSAL_MIN_T(u32,
- (MAX_SB_PER_PATH_BB / num_funcs),
- sb_cnt_info.sb_cnt);
+ OSAL_MEM_ZERO(&resc_info, sizeof(resc_info));
+ resc_info.res_id = ecore_hw_get_mfw_res_id(res_id);
+ if (resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d with MFW resources\n",
+ res_id);
+ return ECORE_INVAL;
+ }
- resc_num[ECORE_L2_QUEUE] = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
- MAX_NUM_L2_QUEUES_BB) / num_funcs;
- resc_num[ECORE_VPORT] = (b_ah ? MAX_NUM_VPORTS_K2 :
- MAX_NUM_VPORTS_BB) / num_funcs;
- resc_num[ECORE_RSS_ENG] = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
- ETH_RSS_ENGINE_NUM_BB) / num_funcs;
- resc_num[ECORE_PQ] = (b_ah ? MAX_QM_TX_QUEUES_K2 :
- MAX_QM_TX_QUEUES_BB) / num_funcs;
- resc_num[ECORE_RL] = 8;
- resc_num[ECORE_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
- resc_num[ECORE_VLAN] = (ETH_NUM_VLAN_FILTERS -
- max_vf_vlan_filters +
- 1 /*For vlan0 */) / num_funcs;
-
- /* TODO - there will be a problem in AH - there are only 11k lines */
- resc_num[ECORE_ILT] = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
- PXP_NUM_ILT_RECORDS_BB) / num_funcs;
+ rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
+ &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "MFW resp failure for a resc alloc req [res_id %d]\n",
+ res_id);
+ return rc;
+ }
+
+ /* Default driver values are applied in the following cases:
+ * - The resource allocation MB command is not supported by the MFW
+ * - There is an internal error in the MFW while processing the request
+ * - The resource ID is unknown to the MFW
+ */
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
+ mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
+ /* @DPDK */
+ DP_INFO(p_hwfn,
+ "No allocation info for resc %d [mcp_resp 0x%x].",
+ res_id, mcp_resp);
+ DP_INFO(p_hwfn,
+ "Applying default values [num %d, start %d].\n",
+ dflt_resc_num, dflt_resc_start);
+
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ goto out;
+ }
+
+ /* TBD - remove this when revising the handling of the SB resource */
+ if (res_id == ECORE_SB) {
+ /* Excluding the slowpath SB */
+ resc_info.size -= 1;
+ resc_info.offset -= p_hwfn->enabled_func_idx;
+ }
+
+ *p_resc_num = resc_info.size;
+ *p_resc_start = resc_info.offset;
+
+ if (*p_resc_num != dflt_resc_num || *p_resc_start != dflt_resc_start) {
+ DP_NOTICE(p_hwfn, false,
+ "Resource %d: MFW allocation [num %d, start %d]",
+ res_id, *p_resc_num, *p_resc_start);
+ DP_NOTICE(p_hwfn, false,
+ "differs from default values [num %d, start %d]%s\n",
+ dflt_resc_num,
+ dflt_resc_start,
+ drv_resc_alloc ? " - applying default values" : "");
+ if (drv_resc_alloc) {
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ }
+ }
+ out:
+ return ECORE_SUCCESS;
+}
+
+static const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
+{
+ switch (res_id) {
+ case ECORE_SB:
+ return "SB";
+ case ECORE_L2_QUEUE:
+ return "L2_QUEUE";
+ case ECORE_VPORT:
+ return "VPORT";
+ case ECORE_RSS_ENG:
+ return "RSS_ENG";
+ case ECORE_PQ:
+ return "PQ";
+ case ECORE_RL:
+ return "RL";
+ case ECORE_MAC:
+ return "MAC";
+ case ECORE_VLAN:
+ return "VLAN";
+ case ECORE_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
+ case ECORE_ILT:
+ return "ILT";
+ case ECORE_LL2_QUEUE:
+ return "LL2_QUEUE";
+ case ECORE_CMDQS_CQS:
+ return "CMDQS_CQS";
+ case ECORE_RDMA_STATS_QUEUE:
+ return "RDMA_STATS_QUEUE";
+ default:
+ return "UNKNOWN_RESOURCE";
+ }
+}
+
+static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
+ bool drv_resc_alloc)
+{
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ enum _ecore_status_t rc;
+ u8 res_id;
+#ifndef ASIC_ONLY
+ u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u32 *resc_num = p_hwfn->hw_info.resc_num;
+ /* For AH, an equal share of the ILT lines between the maximal number of
+ * PFs is not enough for RoCE. This would be solved by the future
+ * resource allocation scheme, but isn't currently present for
+ * FPGA/emulation. For now we keep a number that is sufficient for RoCE
+ * to work - the BB number of ILT lines divided by its max PFs number.
+ */
+ u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
+#endif
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ rc = ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
/* Reduced build contains less PQs */
- if (!(p_hwfn->p_dev->b_is_emul_full))
+ if (!(p_hwfn->p_dev->b_is_emul_full)) {
resc_num[ECORE_PQ] = 32;
+ resc_start[ECORE_PQ] = resc_num[ECORE_PQ] *
+ p_hwfn->enabled_func_idx;
+ }
/* For AH emulation, since we have a possible maximal number of
* 16 enabled PFs, in case there are not enough ILT lines -
@@ -1829,19 +2385,17 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn)
* only with less ILT lines.
*/
if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
- resc_num[ECORE_ILT] = resc_num[ECORE_ILT];
+ resc_num[ECORE_ILT] = OSAL_MAX_T(u32,
+ resc_num[ECORE_ILT],
+ roce_min_ilt_lines);
}
-#endif
- for (i = 0; i < ECORE_MAX_RESC; i++)
- resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
-
-#ifndef ASIC_ONLY
/* Correct the common ILT calculation if PF0 has more */
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
p_hwfn->p_dev->b_is_emul_full &&
- p_hwfn->rel_pf_id && resc_num[ECORE_ILT])
- resc_start[ECORE_ILT] += resc_num[ECORE_ILT];
+ p_hwfn->rel_pf_id && resc_num[ECORE_ILT] < roce_min_ilt_lines)
+ resc_start[ECORE_ILT] += roce_min_ilt_lines -
+ resc_num[ECORE_ILT];
#endif
/* Sanity for ILT */
@@ -1858,29 +2412,12 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn)
ecore_hw_set_feat(p_hwfn);
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
- "The numbers for each resource are:\n"
- "SB = %d start = %d\n"
- "L2_QUEUE = %d start = %d\n"
- "VPORT = %d start = %d\n"
- "PQ = %d start = %d\n"
- "RL = %d start = %d\n"
- "MAC = %d start = %d\n"
- "VLAN = %d start = %d\n"
- "ILT = %d start = %d\n"
- "CMDQS_CQS = %d start = %d\n",
- RESC_NUM(p_hwfn, ECORE_SB), RESC_START(p_hwfn, ECORE_SB),
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
- RESC_START(p_hwfn, ECORE_L2_QUEUE),
- RESC_NUM(p_hwfn, ECORE_VPORT),
- RESC_START(p_hwfn, ECORE_VPORT),
- RESC_NUM(p_hwfn, ECORE_PQ), RESC_START(p_hwfn, ECORE_PQ),
- RESC_NUM(p_hwfn, ECORE_RL), RESC_START(p_hwfn, ECORE_RL),
- RESC_NUM(p_hwfn, ECORE_MAC), RESC_START(p_hwfn, ECORE_MAC),
- RESC_NUM(p_hwfn, ECORE_VLAN),
- RESC_START(p_hwfn, ECORE_VLAN),
- RESC_NUM(p_hwfn, ECORE_ILT), RESC_START(p_hwfn, ECORE_ILT),
- RESC_NUM(p_hwfn, ECORE_CMDQS_CQS),
- RESC_START(p_hwfn, ECORE_CMDQS_CQS));
+ "The numbers for each resource are:\n");
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "%s = %d start = %d\n",
+ ecore_hw_get_resc_name(res_id),
+ RESC_NUM(p_hwfn, res_id),
+ RESC_START(p_hwfn, res_id));
return ECORE_SUCCESS;
}
@@ -1889,19 +2426,20 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
- u32 port_cfg_addr, link_temp, device_capabilities;
+ u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
struct ecore_mcp_link_params *link;
/* Read global nvm_cfg address */
- u32 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+ nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
/* Verify MCP has initialized it */
- if (nvm_cfg_addr == 0) {
+ if (!nvm_cfg_addr) {
DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
return ECORE_INVAL;
}
- /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
+/* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
+
nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
@@ -1912,33 +2450,36 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X25G;
+ break;
default:
DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
core_cfg);
@@ -1981,13 +2522,18 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
link->speed.forced_speed = 50000;
break;
- case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
link->speed.forced_speed = 100000;
break;
default:
DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
}
+ p_hwfn->mcp_info->link_capabilities.default_speed =
+ link->speed.forced_speed;
+ p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
+ link->speed.autoneg;
+
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
link->pause.autoneg = !!(link_temp &
@@ -1999,8 +2545,7 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
link->loopback_mode = 0;
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Read default link: Speed 0x%08x, Adv. Speed 0x%08x,"
- " AN: 0x%02x, PAUSE AN: 0x%02x\n",
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
link->speed.forced_speed, link->speed.advertised_speeds,
link->speed.autoneg, link->pause.autoneg);
@@ -2037,6 +2582,18 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
&p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
+ OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
+ OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
+ OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
+ OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
+ &p_hwfn->hw_info.device_capabilities);
return ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
@@ -2044,52 +2601,69 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u8 num_funcs;
- u32 tmp, mask;
+ u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
+ u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
- num_funcs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_PFS_K2
- : MAX_NUM_PFS_BB;
+ num_funcs = ECORE_IS_AH(p_dev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
* in the other bits are selected.
* Bits 1-15 are for functions 1-15, respectively, and their value is
* '0' only for enabled functions (function 0 always exists and
* enabled).
- * In case of CMT, only the "even" functions are enabled, and thus the
- * number of functions for both hwfns is learnt from the same bits.
+ * In case of CMT in BB, only the "even" functions are enabled, and thus
+ * the number of functions for both hwfns is learnt from the same bits.
*/
-
- tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
- if (tmp & 0x1) {
- if (ECORE_PATH_ID(p_hwfn) && p_hwfn->p_dev->num_hwfns == 1) {
- num_funcs = 0;
- mask = 0xaaaa;
+ reg_function_hide = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+
+ if (reg_function_hide & 0x1) {
+ if (ECORE_IS_BB(p_dev)) {
+ if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
} else {
num_funcs = 1;
- mask = 0x5554;
+ eng_mask = 0xfffe;
}
- tmp = (tmp ^ 0xffffffff) & mask;
+ /* Get the number of the enabled functions on the engine */
+ tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
while (tmp) {
if (tmp & 0x1)
num_funcs++;
tmp >>= 0x1;
}
+
+ /* Get the PF index within the enabled functions */
+ low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
+ tmp = reg_function_hide & eng_mask & low_pfs_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ enabled_func_idx--;
+ tmp >>= 0x1;
+ }
}
p_hwfn->num_funcs_on_engine = num_funcs;
+ p_hwfn->enabled_func_idx = enabled_func_idx;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_FPGA(p_dev)) {
DP_NOTICE(p_hwfn, false,
- "FPGA: Limit number of PFs to 4 [would affect"
- " resource allocation, needed for IOV]\n");
+ "FPGA: Limit number of PFs to 4 [would affect resource allocation, needed for IOV]\n");
p_hwfn->num_funcs_on_engine = 4;
}
#endif
- DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "num_funcs_on_engine = %d\n",
- p_hwfn->num_funcs_on_engine);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
+ "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
+ p_hwfn->rel_pf_id, p_hwfn->abs_pf_id,
+ p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
}
static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
@@ -2131,12 +2705,32 @@ static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
p_hwfn->p_dev->num_ports_in_engines = 0;
- for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
- port = ecore_rd(p_hwfn, p_ptt,
- CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
- if (port & 1)
- p_hwfn->p_dev->num_ports_in_engines++;
- }
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
+ switch ((port & 0xf000) >> 12) {
+ case 1:
+ p_hwfn->p_dev->num_ports_in_engines = 1;
+ break;
+ case 3:
+ p_hwfn->p_dev->num_ports_in_engines = 2;
+ break;
+ case 0xf:
+ p_hwfn->p_dev->num_ports_in_engines = 4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unknown port mode in ECO_RESERVED %08x\n",
+ port);
+ }
+ } else
+#endif
+ for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
+ port = ecore_rd(p_hwfn, p_ptt,
+ CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+ if (port & 1)
+ p_hwfn->p_dev->num_ports_in_engines++;
+ }
}
static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
@@ -2149,15 +2743,17 @@ static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
-ecore_get_hw_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- enum ecore_pci_personality personality)
+ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality personality, bool drv_resc_alloc)
{
enum _ecore_status_t rc;
- rc = ecore_iov_hw_info(p_hwfn, p_hwfn->p_main_ptt);
- if (rc)
- return rc;
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = ecore_iov_hw_info(p_hwfn);
+ if (rc)
+ return rc;
+ }
/* TODO In get_hw_info, amoungst others:
* Get MCP FW revision and determine according to it the supported
@@ -2209,21 +2805,39 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn,
/* To overcome ILT lack for emulation, until at least until we'll have
* a definite answer from system about it, allow only PF0 to be RoCE.
*/
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
- p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
+ if (!p_hwfn->rel_pf_id)
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH_ROCE;
+ else
+ p_hwfn->hw_info.personality = ECORE_PCI_ETH;
+ }
#endif
+ /* although in BB some constellations may support more than 4 tcs,
+ * that can result in performance penalty in some cases. 4
+ * represents a good tradeoff between performance and flexibility.
+ */
+ p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+
+ /* start out with a single active tc. This can be increased either
+ * by dcbx negotiation or by upper layer driver
+ */
+ p_hwfn->hw_info.num_active_tc = 1;
+
ecore_get_num_funcs(p_hwfn, p_ptt);
- /* Feat num is dependent on personality and on the number of functions
- * on the engine. Therefore it should be come after personality
- * initialization and after getting the number of functions.
+ /* In case of forcing the driver's default resource allocation, calling
+ * ecore_hw_get_resc() should come after initializing the personality
+ * and after getting the number of functions, since the calculation of
+ * the resources/features depends on them.
+ * This order is not harmful if not forcing.
*/
- return ecore_hw_get_resc(p_hwfn);
+ return ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
}
-/* @TMP - this should move to a proper .h */
-#define CHIP_NUM_AH 0x8070
+#define ECORE_DEV_ID_MASK 0xff00
+#define ECORE_DEV_ID_MASK_BB 0x1600
+#define ECORE_DEV_ID_MASK_AH 0x8000
static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
{
@@ -2236,19 +2850,19 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
&p_dev->device_id);
+ /* Determine type */
+ if ((p_dev->device_id & ECORE_DEV_ID_MASK) == ECORE_DEV_ID_MASK_AH)
+ p_dev->type = ECORE_DEV_TYPE_AH;
+ else
+ p_dev->type = ECORE_DEV_TYPE_BB;
+
p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CHIP_NUM);
p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_REV);
+ MISCS_REG_CHIP_REV);
MASK_FIELD(CHIP_REV, p_dev->chip_rev);
- /* Determine type */
- if (p_dev->device_id == CHIP_NUM_AH)
- p_dev->type = ECORE_DEV_TYPE_AH;
- else
- p_dev->type = ECORE_DEV_TYPE_BB;
-
/* Learn number of HW-functions */
tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
MISCS_REG_CMT_ENABLED_FOR_PAIR);
@@ -2275,11 +2889,10 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
MISCS_REG_CHIP_TEST_REG) >> 4;
MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_METAL);
+ MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
DP_INFO(p_dev->hwfns,
- "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x"
- " Metal: %04x\n",
+ "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
ECORE_IS_BB(p_dev) ? "BB" : "AH",
CHIP_REV_IS_A0(p_dev) ? 0 : 1,
p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
@@ -2312,6 +2925,7 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
return ECORE_SUCCESS;
}
+#ifndef LINUX_REMOVE
void ecore_prepare_hibernate(struct ecore_dev *p_dev)
{
int j;
@@ -2327,26 +2941,31 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
p_hwfn->hw_init_done = false;
p_hwfn->first_on_engine = false;
+
+ ecore_ptt_invalidate(p_hwfn);
}
}
+#endif
static enum _ecore_status_t
-ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
- void OSAL_IOMEM *p_regview,
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
void OSAL_IOMEM *p_doorbells,
- enum ecore_pci_personality personality)
+ struct ecore_hw_prepare_params *p_params)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Split PCI bars evenly between hwfns */
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
+ if (IS_VF(p_dev))
+ return ecore_vf_hw_prepare(p_hwfn);
+
/* Validate that chip access is feasible */
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn,
- "Reading the ME register returns all Fs;"
- " Preventing further chip access\n");
+ "Reading the ME register returns all Fs; Preventing further chip access\n");
return ECORE_INVAL;
}
@@ -2364,7 +2983,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
- rc = ecore_get_dev_info(p_hwfn->p_dev);
+ rc = ecore_get_dev_info(p_dev);
if (rc != ECORE_SUCCESS)
goto err1;
}
@@ -2378,8 +2997,15 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
goto err1;
}
+ if (p_hwfn == ECORE_LEADING_HWFN(p_dev) && !p_dev->recov_in_prog) {
+ rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+ }
+
/* Read the device configuration information from the HW and SHMEM */
- rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+ rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->personality, p_params->drv_resc_alloc);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
goto err2;
@@ -2392,7 +3018,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
goto err2;
}
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_FPGA(p_dev)) {
DP_NOTICE(p_hwfn, false,
"FPGA: workaround; Prevent DMAE parities\n");
ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7);
@@ -2405,35 +3031,38 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
#endif
return rc;
-err2:
+ err2:
+ if (IS_LEAD_HWFN(p_hwfn))
+ ecore_iov_free_hw_info(p_dev);
ecore_mcp_free(p_hwfn);
-err1:
+ err1:
ecore_hw_hwfn_free(p_hwfn);
-err0:
+ err0:
return rc;
}
-enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality)
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
+ struct ecore_hw_prepare_params *p_params)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
enum _ecore_status_t rc;
- if (IS_VF(p_dev))
- return ecore_vf_hw_prepare(p_dev);
+ p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
/* Store the precompiled init data ptrs */
- ecore_init_iro_array(p_dev);
+ if (IS_PF(p_dev))
+ ecore_init_iro_array(p_dev);
/* Initialize the first hwfn - will learn number of hwfns */
rc = ecore_hw_prepare_single(p_hwfn,
p_dev->regview,
- p_dev->doorbells, personality);
+ p_dev->doorbells, p_params);
if (rc != ECORE_SUCCESS)
return rc;
- personality = p_hwfn->hw_info.personality;
+ p_params->personality = p_hwfn->hw_info.personality;
- /* initialalize 2nd hwfn if necessary */
+ /* initilalize 2nd hwfn if necessary */
if (p_dev->num_hwfns > 1) {
void OSAL_IOMEM *p_regview, *p_doorbell;
u8 OSAL_IOMEM *addr;
@@ -2449,15 +3078,20 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality)
/* prepare second hw function */
rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
- p_doorbell, personality);
+ p_doorbell, p_params);
/* in case of error, need to free the previously
- * initialiazed hwfn 0
+ * initiliazed hwfn 0.
*/
if (rc != ECORE_SUCCESS) {
- ecore_init_free(p_hwfn);
- ecore_mcp_free(p_hwfn);
- ecore_hw_hwfn_free(p_hwfn);
+ if (IS_PF(p_dev)) {
+ ecore_init_free(p_hwfn);
+ ecore_mcp_free(p_hwfn);
+ ecore_hw_hwfn_free(p_hwfn);
+ } else {
+ DP_NOTICE(p_dev, true,
+ "What do we need to free when VF hwfn1 init fails\n");
+ }
return rc;
}
}
@@ -2483,6 +3117,8 @@ void ecore_hw_remove(struct ecore_dev *p_dev)
OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
}
+
+ ecore_iov_free_hw_info(p_dev);
}
static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
@@ -2551,7 +3187,7 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
p_chain->pbl.p_phys_table, pbl_size);
-out:
+ out:
OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
}
@@ -2588,8 +3224,7 @@ ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
(cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
chain_size > ECORE_U32_MAX)) {
DP_NOTICE(p_dev, true,
- "The actual chain size (0x%lx) is larger than"
- " the maximal possible value\n",
+ "The actual chain size (0x%lx) is larger than the maximal possible value\n",
(unsigned long)chain_size);
return ECORE_INVAL;
}
@@ -2664,25 +3299,26 @@ static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
pp_virt_addr_tbl = (void **)OSAL_VALLOC(p_dev, size);
if (!pp_virt_addr_tbl) {
DP_NOTICE(p_dev, true,
- "Failed to allocate memory for the chain"
- " virtual addresses table\n");
+ "Failed to allocate memory for the chain virtual addresses table\n");
return ECORE_NOMEM;
}
OSAL_MEM_ZERO(pp_virt_addr_tbl, size);
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
+ * ecore_chain_init_pbl_mem() is called even in a case of an allocation
+ * failure, since pp_virt_addr_tbl was previously allocated, and it
+ * should be saved to allow its freeing during the error flow.
*/
size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+ ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+ pp_virt_addr_tbl);
if (!p_pbl_virt) {
DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
return ECORE_NOMEM;
}
- ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
- pp_virt_addr_tbl);
-
for (i = 0; i < page_cnt; i++) {
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
ECORE_CHAIN_PAGE_SIZE);
@@ -2728,14 +3364,13 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
if (rc) {
DP_NOTICE(p_dev, true,
"Cannot allocate a chain with the given arguments:\n"
- " [use_mode %d, mode %d, cnt_type %d, num_elems %d,"
- " elem_size %zu]\n",
+ "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
intended_use, mode, cnt_type, num_elems, elem_size);
return rc;
}
ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
- mode, cnt_type);
+ mode, cnt_type, p_dev->dp_ctx);
switch (mode) {
case ECORE_CHAIN_MODE_NEXT_PTR:
@@ -2753,7 +3388,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
return ECORE_SUCCESS;
-nomem:
+ nomem:
ecore_chain_free(p_dev, p_chain);
return rc;
}
@@ -2767,8 +3402,7 @@ enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
DP_NOTICE(p_hwfn, true,
- "l2_queue id [%d] is not valid, available"
- " indices [%d - %d]\n",
+ "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
src_id, min, max);
return ECORE_INVAL;
@@ -2788,8 +3422,7 @@ enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
DP_NOTICE(p_hwfn, true,
- "vport id [%d] is not valid, available"
- " indices [%d - %d]\n",
+ "vport id [%d] is not valid, available indices [%d - %d]\n",
src_id, min, max);
return ECORE_INVAL;
@@ -2809,7 +3442,7 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
DP_NOTICE(p_hwfn, true,
- "rss_eng id [%d] is not valid,avail idx [%d - %d]\n",
+ "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
src_id, min, max);
return ECORE_INVAL;
@@ -2908,9 +3541,12 @@ void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
"Tried to remove a non-configured filter\n");
}
-enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 filter)
+enum _ecore_status_t
+ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type)
{
u32 high, low, en;
int i;
@@ -2918,9 +3554,29 @@ enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
return ECORE_SUCCESS;
- high = filter;
+ high = 0;
low = 0;
-
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ high = source_port_or_eth_type;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ low = source_port_or_eth_type << 16;
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ low = dest_port;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Non valid LLH protocol filter type %d\n", type);
+ return ECORE_INVAL;
+ }
/* Find a free entry and utilize it */
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
en = ecore_rd(p_hwfn, p_ptt,
@@ -2937,7 +3593,7 @@ enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 1);
+ i * sizeof(u32), 1 << type);
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
break;
@@ -2945,17 +3601,52 @@ enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
DP_NOTICE(p_hwfn, false,
"Failed to find an empty LLH filter to utilize\n");
- return ECORE_INVAL;
+ return ECORE_NORESOURCES;
+ }
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "ETH type %x is added at %d\n",
+ source_port_or_eth_type, i);
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "TCP src port %x is added at %d\n",
+ source_port_or_eth_type, i);
+ break;
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "UDP src port %x is added at %d\n",
+ source_port_or_eth_type, i);
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "TCP dst port %x is added at %d\n", dest_port, i);
+ break;
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "UDP dst port %x is added at %d\n", dest_port, i);
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "TCP src/dst ports %x/%x are added at %d\n",
+ source_port_or_eth_type, dest_port, i);
+ break;
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "UDP src/dst ports %x/%x are added at %d\n",
+ source_port_or_eth_type, dest_port, i);
+ break;
}
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "ETH type: %x is added at %d\n", filter, i);
-
return ECORE_SUCCESS;
}
-void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 filter)
+void
+ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type)
{
u32 high, low;
int i;
@@ -2963,11 +3654,41 @@ void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
return;
- high = filter;
+ high = 0;
low = 0;
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ high = source_port_or_eth_type;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ low = source_port_or_eth_type << 16;
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ low = dest_port;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true,
+ "Non valid LLH protocol filter type %d\n", type);
+ return;
+ }
- /* Find the entry and clean it */
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
+ continue;
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
+ continue;
+ if (!(ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32)) & (1 << type)))
+ continue;
if (ecore_rd(p_hwfn, p_ptt,
NIG_REG_LLH_FUNC_FILTER_VALUE +
2 * i * sizeof(u32)) != low)
@@ -2980,6 +3701,11 @@ void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
NIG_REG_LLH_FUNC_FILTER_VALUE +
2 * i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
@@ -2987,6 +3713,7 @@ void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
(2 * i + 1) * sizeof(u32), 0);
break;
}
+
if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
DP_NOTICE(p_hwfn, false,
"Tried to remove a non-configured filter\n");
@@ -3012,97 +3739,30 @@ void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
}
}
-enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- u32 reg_tbl[] = {
- BRB_REG_HEADER_SIZE,
- BTB_REG_HEADER_SIZE,
- CAU_REG_LONG_TIMEOUT_THRESHOLD,
- CCFC_REG_ACTIVITY_COUNTER,
- CDU_REG_CID_ADDR_PARAMS,
- DBG_REG_CLIENT_ENABLE,
- DMAE_REG_INIT,
- DORQ_REG_IFEN,
- GRC_REG_TIMEOUT_EN,
- IGU_REG_BLOCK_CONFIGURATION,
- MCM_REG_INIT,
- MCP2_REG_DBG_DWORD_ENABLE,
- MISC_REG_PORT_MODE,
- MISCS_REG_CLK_100G_MODE,
- MSDM_REG_ENABLE_IN1,
- MSEM_REG_ENABLE_IN,
- NIG_REG_CM_HDR,
- NCSI_REG_CONFIG,
- PBF_REG_INIT,
- PTU_REG_ATC_INIT_ARRAY,
- PCM_REG_INIT,
- PGLUE_B_REG_ADMIN_PER_PF_REGION,
- PRM_REG_DISABLE_PRM,
- PRS_REG_SOFT_RST,
- PSDM_REG_ENABLE_IN1,
- PSEM_REG_ENABLE_IN,
- PSWRQ_REG_DBG_SELECT,
- PSWRQ2_REG_CDUT_P_SIZE,
- PSWHST_REG_DISCARD_INTERNAL_WRITES,
- PSWHST2_REG_DBGSYN_ALMOST_FULL_THR,
- PSWRD_REG_DBG_SELECT,
- PSWRD2_REG_CONF11,
- PSWWR_REG_USDM_FULL_TH,
- PSWWR2_REG_CDU_FULL_TH2,
- QM_REG_MAXPQSIZE_0,
- RSS_REG_RSS_INIT_EN,
- RDIF_REG_STOP_ON_ERROR,
- SRC_REG_SOFT_RST,
- TCFC_REG_ACTIVITY_COUNTER,
- TCM_REG_INIT,
- TM_REG_PXP_READ_DATA_FIFO_INIT,
- TSDM_REG_ENABLE_IN1,
- TSEM_REG_ENABLE_IN,
- TDIF_REG_STOP_ON_ERROR,
- UCM_REG_INIT,
- UMAC_REG_IPG_HD_BKP_CNTL_BB_B0,
- USDM_REG_ENABLE_IN1,
- USEM_REG_ENABLE_IN,
- XCM_REG_INIT,
- XSDM_REG_ENABLE_IN1,
- XSEM_REG_ENABLE_IN,
- YCM_REG_INIT,
- YSDM_REG_ENABLE_IN1,
- YSEM_REG_ENABLE_IN,
- XYLD_REG_SCBD_STRICT_PRIO,
- TMLD_REG_SCBD_STRICT_PRIO,
- MULD_REG_SCBD_STRICT_PRIO,
- YULD_REG_SCBD_STRICT_PRIO,
- };
- u32 test_val[] = { 0x0, 0x1 };
- u32 val, save_val, i, j;
-
- for (i = 0; i < OSAL_ARRAY_SIZE(test_val); i++) {
- for (j = 0; j < OSAL_ARRAY_SIZE(reg_tbl); j++) {
- save_val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
- ecore_wr(p_hwfn, p_ptt, reg_tbl[j], test_val[i]);
- val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
- /* Restore the original register's value */
- ecore_wr(p_hwfn, p_ptt, reg_tbl[j], save_val);
- if (val != test_val[i]) {
- DP_INFO(p_hwfn->p_dev,
- "offset 0x%x: val 0x%x != 0x%x\n",
- reg_tbl[j], val, test_val[i]);
- return ECORE_AGAIN;
- }
- }
+enum _ecore_status_t
+ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) {
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
+ 1 << p_hwfn->abs_pf_id / 2);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, 0);
+ return ECORE_SUCCESS;
}
- return ECORE_SUCCESS;
+
+ DP_NOTICE(p_hwfn, false,
+ "This function can't be set as default\n");
+ return ECORE_INVAL;
}
static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 hw_addr, void *p_qzone,
- osal_size_t qzone_size,
+ u32 hw_addr, void *p_eth_qzone,
+ osal_size_t eth_qzone_size,
u8 timeset)
{
- struct coalescing_timeset *p_coalesce_timeset;
+ struct coalescing_timeset *p_coal_timeset;
if (IS_VF(p_hwfn->p_dev)) {
DP_NOTICE(p_hwfn, true, "VF coalescing config not supported\n");
@@ -3115,72 +3775,99 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
}
- OSAL_MEMSET(p_qzone, 0, qzone_size);
- p_coalesce_timeset = p_qzone;
- p_coalesce_timeset->timeset = timeset;
- p_coalesce_timeset->valid = 1;
- ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_qzone, qzone_size);
+ OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
+ p_coal_timeset = p_eth_qzone;
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
+ ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 coalesce, u8 qid)
+ u16 coalesce, u8 qid, u16 sb_id)
{
- struct ustorm_eth_queue_zone qzone;
+ struct ustorm_eth_queue_zone eth_qzone;
u16 fw_qid = 0;
u32 address;
- u8 timeset;
enum _ecore_status_t rc;
+ u8 timeset, timer_res;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return ECORE_INVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
if (rc != ECORE_SUCCESS)
return rc;
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
- /* Translate the coalescing time into a timeset, according to:
- * Timeout[Rx] = TimeSet[Rx] << (TimerRes[Rx] + 1)
- */
- timeset = coalesce >> (ECORE_CAU_DEF_RX_TIMER_RES + 1);
- rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
+ rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct ustorm_eth_queue_zone), timeset);
if (rc != ECORE_SUCCESS)
goto out;
p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
-out:
+ out:
return rc;
}
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 coalesce, u8 qid)
+ u16 coalesce, u8 qid, u16 sb_id)
{
- struct ystorm_eth_queue_zone qzone;
+ struct xstorm_eth_queue_zone eth_qzone;
u16 fw_qid = 0;
u32 address;
- u8 timeset;
enum _ecore_status_t rc;
+ u8 timeset, timer_res;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return ECORE_INVAL;
+ }
+
+ timeset = (u8)(coalesce >> timer_res);
rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
if (rc != ECORE_SUCCESS)
return rc;
- address = BAR0_MAP_REG_YSDM_RAM + YSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
- /* Translate the coalescing time into a timeset, according to:
- * Timeout[Tx] = TimeSet[Tx] << (TimerRes[Tx] + 1)
- */
- timeset = coalesce >> (ECORE_CAU_DEF_TX_TIMER_RES + 1);
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
+ if (rc != ECORE_SUCCESS)
+ goto out;
- rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
- sizeof(struct ystorm_eth_queue_zone), timeset);
+ address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+
+ rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct xstorm_eth_queue_zone), timeset);
if (rc != ECORE_SUCCESS)
goto out;
p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
-out:
+ out:
return rc;
}
@@ -3193,16 +3880,15 @@ static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
u32 min_pf_rate)
{
struct init_qm_vport_params *vport_params;
- int i, num_vports;
+ int i;
vport_params = p_hwfn->qm_info.qm_vport_params;
- num_vports = p_hwfn->qm_info.num_vports;
- for (i = 0; i < num_vports; i++) {
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
- vport_params[i].vport_wfq =
- (wfq_speed * ECORE_WFQ_UNIT) / min_pf_rate;
+ vport_params[i].vport_wfq = (wfq_speed * ECORE_WFQ_UNIT) /
+ min_pf_rate;
ecore_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
vport_params[i].vport_wfq);
@@ -3212,16 +3898,10 @@ static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
static void
ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
{
- int i, num_vports;
- u32 min_speed;
-
- num_vports = p_hwfn->qm_info.num_vports;
- min_speed = min_pf_rate / num_vports;
+ int i;
- for (i = 0; i < num_vports; i++) {
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
- p_hwfn->qm_info.wfq_data[i].default_min_speed = min_speed;
- }
}
static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
@@ -3229,12 +3909,11 @@ static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
u32 min_pf_rate)
{
struct init_qm_vport_params *vport_params;
- int i, num_vports;
+ int i;
vport_params = p_hwfn->qm_info.qm_vport_params;
- num_vports = p_hwfn->qm_info.num_vports;
- for (i = 0; i < num_vports; i++) {
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
ecore_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
@@ -3242,7 +3921,13 @@ static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
}
}
-/* validate wfq for a given vport and required min rate */
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ * rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
u16 vport_id, u32 req_rate,
u32 min_pf_rate)
@@ -3252,7 +3937,8 @@ static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
num_vports = p_hwfn->qm_info.num_vports;
- /* Check pre-set data for some of the vports */
+/* Accounting for the vports which are configured for WFQ explicitly */
+
for (i = 0; i < num_vports; i++) {
u32 tmp_speed;
@@ -3266,36 +3952,34 @@ static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
/* Include current vport data as well */
req_count++;
total_req_min_rate += req_rate;
- non_requested_count = p_hwfn->qm_info.num_vports - req_count;
+ non_requested_count = num_vports - req_count;
/* validate possible error cases */
if (req_rate > min_pf_rate) {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Vport [%d] - Requested rate[%d Mbps] is greater"
- " than configured PF min rate[%d Mbps]\n",
+ "Vport [%d] - Requested rate[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
vport_id, req_rate, min_pf_rate);
return ECORE_INVAL;
}
- if (req_rate * ECORE_WFQ_UNIT / min_pf_rate < 1) {
+ if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Vport [%d] - Requested rate[%d Mbps] is less than"
- " one percent of configured PF min rate[%d Mbps]\n",
+ "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
vport_id, req_rate, min_pf_rate);
return ECORE_INVAL;
}
/* TBD - for number of vports greater than 100 */
- if (ECORE_WFQ_UNIT / p_hwfn->qm_info.num_vports < 1) {
+ if (num_vports > ECORE_WFQ_UNIT) {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Number of vports are greater than 100\n");
+ "Number of vports is greater than %d\n",
+ ECORE_WFQ_UNIT);
return ECORE_INVAL;
}
if (total_req_min_rate > min_pf_rate) {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Total requested min rate for all vports[%d Mbps]"
- "is greater than configured PF min rate[%d Mbps]\n",
+ "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
total_req_min_rate, min_pf_rate);
return ECORE_INVAL;
}
@@ -3305,8 +3989,12 @@ static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
left_rate_per_vp = total_left_rate / non_requested_count;
/* validate if non requested get < 1% of min bw */
- if (left_rate_per_vp * ECORE_WFQ_UNIT / min_pf_rate < 1)
+ if (left_rate_per_vp < min_pf_rate / ECORE_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ left_rate_per_vp, min_pf_rate);
return ECORE_INVAL;
+ }
/* now req_rate for given vport passes all scenarios.
* assign final wfq rates to all vports.
@@ -3355,25 +4043,25 @@ static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 min_pf_rate)
{
- int rc = ECORE_SUCCESS;
bool use_wfq = false;
- u16 i, num_vports;
-
- num_vports = p_hwfn->qm_info.num_vports;
+ int rc = ECORE_SUCCESS;
+ u16 i;
/* Validate all pre configured vports for wfq */
- for (i = 0; i < num_vports; i++) {
- if (p_hwfn->qm_info.wfq_data[i].configured) {
- u32 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
-
- use_wfq = true;
- rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
- if (rc == ECORE_INVAL) {
- DP_NOTICE(p_hwfn, false,
- "Validation failed while"
- " configuring min rate\n");
- break;
- }
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 rate;
+
+ if (!p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+ use_wfq = true;
+
+ rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "WFQ validation failed while configuring min rate\n");
+ break;
}
}
@@ -3396,7 +4084,7 @@ int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
/* TBD - for multiple hardware functions - that is 100 gig */
if (p_dev->num_hwfns > 1) {
DP_NOTICE(p_dev, false,
- "WFQ configuration is not supported for this dev\n");
+ "WFQ configuration is not supported for this device\n");
return rc;
}
@@ -3430,7 +4118,7 @@ void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
/* TBD - for multiple hardware functions - that is 100 gig */
if (p_dev->num_hwfns > 1) {
DP_VERBOSE(p_dev, ECORE_MSG_LINK,
- "WFQ configuration is not supported for this dev\n");
+ "WFQ configuration is not supported for this device\n");
return;
}
@@ -3452,12 +4140,21 @@ int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
- if (!p_link->line_speed)
+ if (!p_link->line_speed && (max_bw != 100))
return rc;
p_link->speed = (p_link->line_speed * max_bw) / 100;
+ p_hwfn->qm_info.pf_rl = p_link->speed;
+
+ /* Since the limiter also affects Tx-switched traffic, we don't want it
+ * to limit such traffic in case there's no actual limit.
+ * In that case, set limit to imaginary high boundary.
+ */
+ if (max_bw == 100)
+ p_hwfn->qm_info.pf_rl = 100000;
- rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_link->speed);
+ rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_hwfn->qm_info.pf_rl);
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Configured MAX bandwidth to be %08x Mb/sec\n",
@@ -3490,12 +4187,11 @@ int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
p_link, max_bw);
- if (rc != ECORE_SUCCESS) {
- ecore_ptt_release(p_hwfn, p_ptt);
- return rc;
- }
ecore_ptt_release(p_hwfn, p_ptt);
+
+ if (rc != ECORE_SUCCESS)
+ break;
}
return rc;
@@ -3509,6 +4205,7 @@ int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
int rc = ECORE_SUCCESS;
p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+ p_hwfn->qm_info.pf_wfq = min_bw;
if (!p_link->line_speed)
return rc;
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 535b82b5..042c0af2 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -13,8 +13,6 @@
#include "ecore_chain.h"
#include "ecore_int_api.h"
-struct ecore_tunn_start_params;
-
/**
* @brief ecore_init_dp - initialize the debug level
*
@@ -24,7 +22,9 @@ struct ecore_tunn_start_params;
* @param dp_ctx
*/
void ecore_init_dp(struct ecore_dev *p_dev,
- u32 dp_module, u8 dp_level, void *dp_ctx);
+ u32 dp_module,
+ u8 dp_level,
+ void *dp_ctx);
/**
* @brief ecore_init_struct - initialize the device structure to
@@ -57,26 +57,31 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
*/
void ecore_resc_setup(struct ecore_dev *p_dev);
+struct ecore_hw_init_params {
+ /* tunnelling parameters */
+ struct ecore_tunn_start_params *p_tunn;
+ bool b_hw_start;
+ /* interrupt mode [msix, inta, etc.] to use */
+ enum ecore_int_mode int_mode;
+/* npar tx switching to be used for vports configured for tx-switching */
+
+ bool allow_npar_tx_switch;
+ /* binary fw data pointer in binary fw file */
+ const u8 *bin_fw_data;
+ /* the OS Epoch time in seconds */
+ u32 epoch;
+};
+
/**
* @brief ecore_hw_init -
*
* @param p_dev
- * @param p_tunn - tunneling parameters
- * @param b_hw_start
- * @param int_mode - interrupt mode [msix, inta, etc.] to use.
- * @param allow_npar_tx_switch - npar tx switching to be used
- * for vports configured for tx-switching.
- * @param bin_fw_data - binary fw data pointer in binary fw file.
- * Pass NULL if not using binary fw file.
+ * @param p_params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
- struct ecore_tunn_start_params *p_tunn,
- bool b_hw_start,
- enum ecore_int_mode int_mode,
- bool allow_npar_tx_switch,
- const u8 *bin_fw_data);
+ struct ecore_hw_init_params *p_params);
/**
* @brief ecore_hw_timers_stop_all -
@@ -98,14 +103,15 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_stop_fastpath -should be called incase
- * slowpath is still required for the device, but
- * fastpath is not.
+ * slowpath is still required for the device,
+ * but fastpath is not.
*
* @param p_dev
*
*/
void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
+#ifndef LINUX_REMOVE
/**
* @brief ecore_prepare_hibernate -should be called when
* the system is going into the hibernate state
@@ -114,6 +120,7 @@ void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
*
*/
void ecore_prepare_hibernate(struct ecore_dev *p_dev);
+#endif
/**
* @brief ecore_hw_start_fastpath -restart fastpath traffic,
@@ -133,15 +140,25 @@ void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
*/
enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev);
+struct ecore_hw_prepare_params {
+ /* personality to initialize */
+ int personality;
+ /* force the driver's default resource allocation */
+ bool drv_resc_alloc;
+ /* check the reg_fifo after any register access */
+ bool chk_reg_fifo;
+};
+
/**
* @brief ecore_hw_prepare -
*
* @param p_dev
- * @param personality - personality to initialize
+ * @param p_params
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality);
+enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
+ struct ecore_hw_prepare_params *p_params);
/**
* @brief ecore_hw_remove -
@@ -172,7 +189,8 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
* @param p_hwfn
* @param p_ptt
*/
-void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
#ifndef __EXTRACT__LINUX__
struct ecore_eth_stats {
@@ -270,61 +288,66 @@ enum ecore_dmae_address_type_t {
#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
struct ecore_dmae_params {
- u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
+ u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
};
/**
-* @brief ecore_dmae_host2grc - copy data from source addr to
-* dmae registers using the given ptt
-*
-* @param p_hwfn
-* @param p_ptt
-* @param source_addr
-* @param grc_addr (dmae_data_offset)
-* @param size_in_dwords
-* @param flags (one of the flags defined above)
-*/
+ * @brief ecore_dmae_host2grc - copy data from source addr to
+ * dmae registers using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param grc_addr (dmae_data_offset)
+ * @param size_in_dwords
+ * @param flags (one of the flags defined above)
+ */
enum _ecore_status_t
ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u64 source_addr,
- u32 grc_addr, u32 size_in_dwords, u32 flags);
-
-/**
-* @brief ecore_dmae_grc2host - Read data from dmae data offset
-* to source address using the given ptt
-*
-* @param p_ptt
-* @param grc_addr (dmae_data_offset)
-* @param dest_addr
-* @param size_in_dwords
-* @param flags - one of the flags defined above
-*/
+ u32 grc_addr,
+ u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief ecore_dmae_grc2host - Read data from dmae data offset
+ * to source address using the given ptt
+ *
+ * @param p_ptt
+ * @param grc_addr (dmae_data_offset)
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param flags - one of the flags defined above
+ */
enum _ecore_status_t
ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 grc_addr,
- dma_addr_t dest_addr, u32 size_in_dwords, u32 flags);
-
-/**
-* @brief ecore_dmae_host2host - copy data from to source address
-* to a destination address (for SRIOV) using the given ptt
-*
-* @param p_hwfn
-* @param p_ptt
-* @param source_addr
-* @param dest_addr
-* @param size_in_dwords
-* @param params
-*/
+ dma_addr_t dest_addr,
+ u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief ecore_dmae_host2host - copy data from to source address
+ * to a destination address (for SRIOV) using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param params
+ */
enum _ecore_status_t
ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
- u32 size_in_dwords, struct ecore_dmae_params *p_params);
+ u32 size_in_dwords,
+ struct ecore_dmae_params *p_params);
/**
* @brief ecore_chain_alloc - Allocate and initialize a chain
@@ -344,7 +367,8 @@ ecore_chain_alloc(struct ecore_dev *p_dev,
enum ecore_chain_mode mode,
enum ecore_chain_cnt_type cnt_type,
u32 num_elems,
- osal_size_t elem_size, struct ecore_chain *p_chain);
+ osal_size_t elem_size,
+ struct ecore_chain *p_chain);
/**
* @brief ecore_chain_free - Free chain DMA memory
@@ -352,7 +376,8 @@ ecore_chain_alloc(struct ecore_dev *p_dev,
* @param p_hwfn
* @param p_chain
*/
-void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain);
+void ecore_chain_free(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain);
/**
* @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
@@ -364,7 +389,8 @@ void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain);
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
- u16 src_id, u16 *dst_id);
+ u16 src_id,
+ u16 *dst_id);
/**
* @@brief ecore_fw_vport - Get absolute vport ID
@@ -376,7 +402,8 @@ enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
- u8 src_id, u8 *dst_id);
+ u8 src_id,
+ u8 *dst_id);
/**
* @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
@@ -388,7 +415,8 @@ enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
- u8 src_id, u8 *dst_id);
+ u8 src_id,
+ u8 *dst_id);
/**
* @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
@@ -398,8 +426,8 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
* @param p_filter - MAC to add
*/
enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 *p_filter);
+ struct ecore_ptt *p_ptt,
+ u8 *p_filter);
/**
* @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
@@ -409,28 +437,50 @@ enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
* @param p_filter - MAC to remove
*/
void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_filter);
+ struct ecore_ptt *p_ptt,
+ u8 *p_filter);
+
+enum ecore_llh_port_filter_type_t {
+ ECORE_LLH_FILTER_ETHERTYPE,
+ ECORE_LLH_FILTER_TCP_SRC_PORT,
+ ECORE_LLH_FILTER_TCP_DEST_PORT,
+ ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
+ ECORE_LLH_FILTER_UDP_SRC_PORT,
+ ECORE_LLH_FILTER_UDP_DEST_PORT,
+ ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
+};
/**
- * @brief ecore_llh_add_ethertype_filter - configures a ethertype filter in llh
+ * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
*
* @param p_hwfn
* @param p_ptt
- * @param filter - ethertype to add
+ * @param source_port_or_eth_type - source port or ethertype to add
+ * @param dest_port - destination port to add
+ * @param type - type of filters and comparing
*/
-enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 filter);
+enum _ecore_status_t
+ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type);
/**
- * @brief ecore_llh_remove_ethertype_filter - removes a ethertype llh filter
+ * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
*
* @param p_hwfn
* @param p_ptt
- * @param filter - ethertype to remove
+ * @param source_port_or_eth_type - source port or ethertype to add
+ * @param dest_port - destination port to add
+ * @param type - type of filters and comparing
*/
-void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 filter);
+void
+ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 source_port_or_eth_type,
+ u16 dest_port,
+ enum ecore_llh_port_filter_type_t type);
/**
* @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
@@ -439,59 +489,67 @@ void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
* @param p_ptt
*/
void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+ struct ecore_ptt *p_ptt);
- /**
-*@brief Cleanup of previous driver remains prior to load
+/**
+ * @brief ecore_llh_set_function_as_default - set function as default per port
*
* @param p_hwfn
* @param p_ptt
- * @param id - For PF, engine-relative. For VF, PF-relative.
- * @param is_vf - true iff cleanup is made for a VF.
- *
- * @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 id, bool is_vf);
+enum _ecore_status_t
+ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
- * @brief ecore_test_registers - Perform register tests
+ *@brief Cleanup of previous driver remains prior to load
*
* @param p_hwfn
* @param p_ptt
+ * @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
*
- * @return enum _ecore_status_t
+ * @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 id,
+ bool is_vf);
/**
* @brief ecore_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
+ * The fact that we can configure coalescing to up to 511, but on varying
+ * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
+ * for the highest values.
*
* @param p_hwfn
* @param p_ptt
* @param coalesce - Coalesce value in micro seconds.
* @param qid - Queue index.
+ * @param qid - SB Id
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 coalesce, u8 qid);
+ u16 coalesce, u8 qid, u16 sb_id);
/**
* @brief ecore_set_txq_coalesce - Configure coalesce parameters for a Tx queue
+ * While the API allows setting coalescing per-qid, all tx queues sharing a
+ * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
*
* @param p_hwfn
* @param p_ptt
* @param coalesce - Coalesce value in micro seconds.
* @param qid - Queue index.
+ * @param qid - SB Id
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u8 coalesce, u8 qid);
+ u16 coalesce, u8 qid, u16 sb_id);
#endif
diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h
index cc49fc7b..6395b7cd 100644
--- a/drivers/net/qede/base/ecore_gtt_reg_addr.h
+++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -10,33 +10,43 @@
#define GTT_REG_ADDR_H
/* Win 2 */
-#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
+/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
/* Win 3 */
-#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
+/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
/* Win 4 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
+/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
/* Win 5 */
-#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
+/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
+/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
/* Win 7 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
+/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
/* Win 8 */
-#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
+/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
/* Win 9 */
-#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
+/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
/* Win 10 */
-#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
+/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
/* Win 11 */
-#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
+/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
#endif
diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h
index f2efe24e..2ddc5f19 100644
--- a/drivers/net/qede/base/ecore_gtt_values.h
+++ b/drivers/net/qede/base/ecore_gtt_values.h
@@ -11,16 +11,16 @@
static u32 pxp_global_win[] = {
0,
0,
- 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
- 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
- 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
- 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
- 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
- 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
- 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
- 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
- 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
- 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
+ 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+ 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+ 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
+ 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
0,
0,
0,
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index e341b95c..179d410f 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -13,6 +13,7 @@
/********************************/
#include "common_hsi.h"
+
/*
* opcodes for the event ring
*/
@@ -25,10 +26,12 @@ enum common_event_opcode {
COMMON_EVENT_VF_FLR,
COMMON_EVENT_PF_UPDATE,
COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_RL_UPDATE,
COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
};
+
/*
* Common Ramrod Command IDs
*/
@@ -39,10 +42,12 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_VF_START /* VF Function Start */,
COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */,
COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */,
+ COMMON_RAMROD_RL_UPDATE /* QCN/DCQCN RL update Ramrod */,
COMMON_RAMROD_EMPTY /* Empty Ramrod */,
MAX_COMMON_RAMROD_CMD_ID
};
+
/*
* The core storm context for the Ystorm
*/
@@ -63,8 +68,8 @@ struct pstorm_core_conn_st_ctx {
struct xstorm_core_conn_st_ctx {
__le32 spq_base_lo /* SPQ Ring Base Address low dword */;
__le32 spq_base_hi /* SPQ Ring Base Address high dword */;
- struct regpair consolid_base_addr /* Consolidation Ring Base Address */
- ;
+/* Consolidation Ring Base Address */
+ struct regpair consolid_base_addr;
__le16 spq_cons /* SPQ Ring Consumer */;
__le16 consolid_cons /* Consolidation Ring Consumer */;
__le32 reserved0[55] /* Pad to 15 cycles */;
@@ -74,210 +79,300 @@ struct xstorm_core_conn_ag_ctx {
u8 reserved0 /* cdu_validation */;
u8 core_state /* state */;
u8 flags0;
+/* exist_in_qm0 */
#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+/* bit6 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+/* bit7 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
+/* bit8 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+/* bit9 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
+/* timer0cf */
#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+/* cf10 */
#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+/* cf11 */
#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
+/* cf12 */
#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+/* cf13 */
#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+/* cf14 */
#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+/* cf15 */
#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
+/* cf16 */
#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+/* cf_array_cf */
#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+/* cf18 */
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+/* cf19 */
#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
+/* cf20 */
#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+/* cf21 */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+/* cf22 */
#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
+/* cf2en */
#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
+/* cf10en */
#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+/* cf_array_cf_en */
#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
+/* cf18en */
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+/* rule0en */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
+/* rule2en */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
+/* rule10en */
#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
+/* rule18en */
#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
+/* bit16 */
#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+/* bit17 */
#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+/* bit18 */
#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+/* bit19 */
#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+/* bit20 */
#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+/* bit21 */
#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+/* cf23 */
#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2 /* byte2 */;
@@ -337,84 +432,84 @@ struct tstorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
@@ -441,58 +536,58 @@ struct ustorm_core_conn_ag_ctx {
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
@@ -524,27 +619,28 @@ struct ustorm_core_conn_st_ctx {
* core connection context
*/
struct core_conn_context {
- struct ystorm_core_conn_st_ctx ystorm_st_context
- /* ystorm storm context */;
+/* ystorm storm context */
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2] /* padding */;
- struct pstorm_core_conn_st_ctx pstorm_st_context
- /* pstorm storm context */;
+/* pstorm storm context */
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2] /* padding */;
- struct xstorm_core_conn_st_ctx xstorm_st_context
- /* xstorm storm context */;
- struct xstorm_core_conn_ag_ctx xstorm_ag_context
- /* xstorm aggregative context */;
- struct tstorm_core_conn_ag_ctx tstorm_ag_context
- /* tstorm aggregative context */;
- struct ustorm_core_conn_ag_ctx ustorm_ag_context
- /* ustorm aggregative context */;
- struct mstorm_core_conn_st_ctx mstorm_st_context
- /* mstorm storm context */;
- struct ustorm_core_conn_st_ctx ustorm_st_context
- /* ustorm storm context */;
+/* xstorm storm context */
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+/* xstorm aggregative context */
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+/* tstorm aggregative context */
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+/* ustorm aggregative context */
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
+/* mstorm storm context */
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+/* ustorm storm context */
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2] /* padding */;
};
+
/*
* How ll2 should deal with packet upon errors
*/
@@ -555,6 +651,7 @@ enum core_error_handle {
MAX_CORE_ERROR_HANDLE
};
+
/*
* opcodes for the event ring
*/
@@ -566,17 +663,19 @@ enum core_event_opcode {
MAX_CORE_EVENT_OPCODE
};
+
/*
* The L4 pseudo checksum mode for Core
*/
enum core_l4_pseudo_checksum_mode {
- CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH
- ,
- CORE_L4_PSEUDO_CSUM_ZERO_LENGTH
- /* Pseudo Checksum on packet is calculated with zero length. */,
+/* Pseudo Checksum on packet is calculated with the correct packet length. */
+ CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+/* Pseudo Checksum on packet is calculated with zero length. */
+ CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
+
/*
* Light-L2 RX Producers in Tstorm RAM
*/
@@ -587,24 +686,26 @@ struct core_ll2_port_stats {
struct regpair gsi_crcchksm_error;
};
+
/*
* Ethernet TX Per Queue Stats
*/
struct core_ll2_pstorm_per_queue_stat {
- struct regpair sent_ucast_bytes
- /* number of total bytes sent without errors */;
- struct regpair sent_mcast_bytes
- /* number of total bytes sent without errors */;
- struct regpair sent_bcast_bytes
- /* number of total bytes sent without errors */;
- struct regpair sent_ucast_pkts
- /* number of total packets sent without errors */;
- struct regpair sent_mcast_pkts
- /* number of total packets sent without errors */;
- struct regpair sent_bcast_pkts
- /* number of total packets sent without errors */;
+/* number of total bytes sent without errors */
+ struct regpair sent_ucast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_mcast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_bcast_bytes;
+/* number of total packets sent without errors */
+ struct regpair sent_ucast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_mcast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_bcast_pkts;
};
+
/*
* Light-L2 RX Producers in Tstorm RAM
*/
@@ -614,13 +715,15 @@ struct core_ll2_rx_prod {
__le32 reserved;
};
+
struct core_ll2_tstorm_per_queue_stat {
- struct regpair packet_too_big_discard
- /* Number of packets discarded because they are bigger than MTU */;
- struct regpair no_buff_discard
- /* Number of packets discarded due to lack of host buffers */;
+/* Number of packets discarded because they are bigger than MTU */
+ struct regpair packet_too_big_discard;
+/* Number of packets discarded due to lack of host buffers */
+ struct regpair no_buff_discard;
};
+
struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_ucast_bytes;
struct regpair rcv_mcast_bytes;
@@ -630,6 +733,7 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+
/*
* Core Ramrod Command IDs (light L2)
*/
@@ -642,19 +746,33 @@ enum core_ramrod_cmd_id {
MAX_CORE_RAMROD_CMD_ID
};
+
+/*
+ * Core RX CQE Type for Light L2
+ */
+enum core_roce_flavor_type {
+ CORE_ROCE,
+ CORE_RROCE,
+ MAX_CORE_ROCE_FLAVOR_TYPE
+};
+
+
/*
* Specifies how ll2 should deal with packets errors: packet_too_big and no_buff
*/
struct core_rx_action_on_error {
u8 error_type;
+/* ll2 how to handle error packet_too_big (use enum core_error_handle) */
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+/* ll2 how to handle error with no_buff (use enum core_error_handle) */
#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
};
+
/*
* Core RX BD for Light L2
*/
@@ -663,6 +781,7 @@ struct core_rx_bd {
__le16 reserved[4];
};
+
/*
* Core RX CM offload BD for Light L2
*/
@@ -677,10 +796,12 @@ struct core_rx_bd_with_buff_len {
*/
union core_rx_bd_union {
struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */;
- struct core_rx_bd_with_buff_len rx_bd_with_len
- /* Core Rx Bd with dynamic buffer length */;
+/* Core Rx Bd with dynamic buffer length */
+ struct core_rx_bd_with_buff_len rx_bd_with_len;
};
+
+
/*
* Opaque Data for Light L2 RX CQE .
*/
@@ -688,6 +809,7 @@ struct core_rx_cqe_opaque_data {
__le32 data[2] /* Opaque CQE Data */;
};
+
/*
* Core RX CQE Type for Light L2
*/
@@ -699,15 +821,16 @@ enum core_rx_cqe_type {
MAX_CORE_RX_CQE_TYPE
};
+
/*
* Core RX CQE for Light L2 .
*/
struct core_rx_fast_path_cqe {
u8 type /* CQE type */;
- u8 placement_offset
- /* Offset (in bytes) of the packet from start of the buffer */;
- struct parsing_and_err_flags parse_flags
- /* Parsing and error flags from the parser */;
+/* Offset (in bytes) of the packet from start of the buffer */
+ u8 placement_offset;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags parse_flags;
__le16 packet_length /* Total packet length (from the parser) */;
__le16 vlan /* 802.1q VLAN tag */;
struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
@@ -720,8 +843,8 @@ struct core_rx_fast_path_cqe {
struct core_rx_gsi_offload_cqe {
u8 type /* CQE type */;
u8 data_length_error /* set if gsi data is bigger than buff */;
- struct parsing_and_err_flags parse_flags
- /* Parsing and error flags from the parser */;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags parse_flags;
__le16 data_length /* Total packet length (from the parser) */;
__le16 vlan /* 802.1q VLAN tag */;
__le32 src_mac_addrhi /* hi 4 bytes source mac address */;
@@ -749,6 +872,10 @@ union core_rx_cqe_union {
struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */;
};
+
+
+
+
/*
* Ramrod data for rx queue start ramrod
*/
@@ -762,18 +889,28 @@ struct core_rx_start_ramrod_data {
u8 complete_event_flg /* post completion to the event ring if set */;
u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
- u8 inner_vlan_removal_en
- /* if set, 802.1q tags will be removed and copied to CQE */;
+/* if set, 802.1q tags will be removed and copied to CQE */
+ u8 inner_vlan_removal_en;
u8 queue_id /* Light L2 RX Queue ID */;
u8 main_func_queue /* Is this the main queue for the PF */;
+/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
+ * main_func_queue is set.
+ */
u8 mf_si_bcast_accept_all;
+/* Duplicate multicast packets to LL2 main queue in mf_si mode. Valid if
+ * main_func_queue is set.
+ */
u8 mf_si_mcast_accept_all;
+/* Specifies how ll2 should deal with packets errors: packet_too_big and
+ * no_buff
+ */
struct core_rx_action_on_error action_on_error;
- u8 gsi_offload_flag
- /* set when in GSI offload mode on ROCE connection */;
+/* set when in GSI offload mode on ROCE connection */
+ u8 gsi_offload_flag;
u8 reserved[7];
};
+
/*
* Ramrod data for rx queue stop ramrod
*/
@@ -785,25 +922,38 @@ struct core_rx_stop_ramrod_data {
__le16 reserved2[2];
};
+
/*
* Flags for Core TX BD
*/
struct core_tx_bd_flags {
u8 as_bitfield;
+/* Do not allow additional VLAN manipulations on this packet (DCB) */
#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+/* Insert VLAN into packet */
#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
+/* This is the first BD of the packet (for debug) */
#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
+/* Calculate the IP checksum for the packet */
#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
+/* Calculate the L4 checksum for the packet */
#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
+/* Packet is IPv6 with extensions */
#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
+/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
+ * 0-TCP, 1-UDP
+ */
#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
+/* The pseudo checksum mode to place in the L4 checksum field. Required only
+ * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
+ */
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
};
@@ -814,12 +964,36 @@ struct core_tx_bd_flags {
struct core_tx_bd {
struct regpair addr /* Buffer Address */;
__le16 nbytes /* Number of Bytes in Buffer */;
- __le16 vlan /* VLAN to insert to packet (if insertion flag set) */;
- u8 nbds /* Number of BDs that make up one packet */;
+/* Network packets: VLAN to insert to packet (if insertion flag set) LoopBack
+ * packets: echo data to pass to Rx
+ */
+ __le16 nw_vlan_or_lb_echo;
+ u8 bitfield0;
+/* Number of BDs that make up one packet - width wide enough to present
+ * X_CORE_LL2_NUM_OF_BDS_ON_ST_CT
+ */
+#define CORE_TX_BD_NBDS_MASK 0xF
+#define CORE_TX_BD_NBDS_SHIFT 0
+/* Use roce_flavor enum - Diffrentiate between Roce flavors is valid when
+ * connType is ROCE (use enum core_roce_flavor_type)
+ */
+#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
+#define CORE_TX_BD_RESERVED0_MASK 0x7
+#define CORE_TX_BD_RESERVED0_SHIFT 5
struct core_tx_bd_flags bd_flags /* BD Flags */;
- __le16 l4_hdr_offset_w;
+ __le16 bitfield1;
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+/* Packet destination - Network, LB (use enum core_tx_dest) */
+#define CORE_TX_BD_TX_DST_MASK 0x1
+#define CORE_TX_BD_TX_DST_SHIFT 14
+#define CORE_TX_BD_RESERVED1_MASK 0x1
+#define CORE_TX_BD_RESERVED1_SHIFT 15
};
+
+
/*
* Light L2 TX Destination
*/
@@ -829,25 +1003,26 @@ enum core_tx_dest {
MAX_CORE_TX_DEST
};
+
/*
- * Ramrod data for rx queue start ramrod
+ * Ramrod data for tx queue start ramrod
*/
struct core_tx_start_ramrod_data {
struct regpair pbl_base_addr /* Address of the pbl page */;
__le16 mtu /* Maximum transmission unit */;
__le16 sb_id /* Status block ID */;
u8 sb_index /* Status block protocol index */;
- u8 tx_dest /* TX Destination (either Network or LB) */;
u8 stats_en /* Statistics Enable */;
u8 stats_id /* Statistics Counter ID */;
+ u8 conn_type /* connection type that loaded ll2 */;
__le16 pbl_size /* Number of BD pages pointed by PBL */;
__le16 qm_pq_id /* QM PQ ID */;
- u8 conn_type /* connection type that loaded ll2 */;
- u8 gsi_offload_flag
- /* set when in GSI offload mode on ROCE connection */;
- u8 resrved[2];
+/* set when in GSI offload mode on ROCE connection */
+ u8 gsi_offload_flag;
+ u8 resrved[3];
};
+
/*
* Ramrod data for tx queue stop ramrod
*/
@@ -855,49 +1030,133 @@ struct core_tx_stop_ramrod_data {
__le32 reserved0[2];
};
+
+/*
+ * Enum flag for what type of dcb data to update
+ */
+enum dcb_dhcp_update_flag {
+/* use when no change should be done to dcb data */
+ DONT_UPDATE_DCB_DHCP,
+ UPDATE_DCB /* use to update only l2 (vlan) priority */,
+ UPDATE_DSCP /* use to update only l3 dhcp */,
+ UPDATE_DCB_DSCP /* update vlan pri and dhcp */,
+ MAX_DCB_DHCP_UPDATE_FLAG
+};
+
+
+struct eth_mstorm_per_pf_stat {
+ struct regpair gre_discard_pkts /* Dropped GRE RX packets */;
+ struct regpair vxlan_discard_pkts /* Dropped VXLAN RX packets */;
+ struct regpair geneve_discard_pkts /* Dropped GENEVE RX packets */;
+ struct regpair lb_discard_pkts /* Dropped Tx switched packets */;
+};
+
+
struct eth_mstorm_per_queue_stat {
+/* Number of packets discarded because TTL=0 (in IPv4) or hopLimit=0 (IPv6) */
struct regpair ttl0_discard;
+/* Number of packets discarded because they are bigger than MTU */
struct regpair packet_too_big_discard;
+/* Number of packets discarded due to lack of host buffers (BDs/SGEs/CQEs) */
struct regpair no_buff_discard;
+/* Number of packets discarded because of no active Rx connection */
struct regpair not_active_discard;
+/* number of coalesced packets in all TPA aggregations */
struct regpair tpa_coalesced_pkts;
+/* total number of TPA aggregations */
struct regpair tpa_coalesced_events;
+/* number of aggregations, which abnormally ended */
struct regpair tpa_aborts_num;
+/* total TCP payload length in all TPA aggregations */
struct regpair tpa_coalesced_bytes;
};
+
+/*
+ * Ethernet TX Per PF
+ */
+struct eth_pstorm_per_pf_stat {
+/* number of total ucast bytes sent on loopback port without errors */
+ struct regpair sent_lb_ucast_bytes;
+/* number of total mcast bytes sent on loopback port without errors */
+ struct regpair sent_lb_mcast_bytes;
+/* number of total bcast bytes sent on loopback port without errors */
+ struct regpair sent_lb_bcast_bytes;
+/* number of total ucast packets sent on loopback port without errors */
+ struct regpair sent_lb_ucast_pkts;
+/* number of total mcast packets sent on loopback port without errors */
+ struct regpair sent_lb_mcast_pkts;
+/* number of total bcast packets sent on loopback port without errors */
+ struct regpair sent_lb_bcast_pkts;
+ struct regpair sent_gre_bytes /* Sent GRE bytes */;
+ struct regpair sent_vxlan_bytes /* Sent VXLAN bytes */;
+ struct regpair sent_geneve_bytes /* Sent GENEVE bytes */;
+ struct regpair sent_gre_pkts /* Sent GRE packets */;
+ struct regpair sent_vxlan_pkts /* Sent VXLAN packets */;
+ struct regpair sent_geneve_pkts /* Sent GENEVE packets */;
+ struct regpair gre_drop_pkts /* Dropped GRE TX packets */;
+ struct regpair vxlan_drop_pkts /* Dropped VXLAN TX packets */;
+ struct regpair geneve_drop_pkts /* Dropped GENEVE TX packets */;
+};
+
+
/*
* Ethernet TX Per Queue Stats
*/
struct eth_pstorm_per_queue_stat {
- struct regpair sent_ucast_bytes
- /* number of total bytes sent without errors */;
- struct regpair sent_mcast_bytes
- /* number of total bytes sent without errors */;
- struct regpair sent_bcast_bytes
- /* number of total bytes sent without errors */;
- struct regpair sent_ucast_pkts
- /* number of total packets sent without errors */;
- struct regpair sent_mcast_pkts
- /* number of total packets sent without errors */;
- struct regpair sent_bcast_pkts
- /* number of total packets sent without errors */;
- struct regpair error_drop_pkts
- /* number of total packets dropped due to errors */;
+/* number of total bytes sent without errors */
+ struct regpair sent_ucast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_mcast_bytes;
+/* number of total bytes sent without errors */
+ struct regpair sent_bcast_bytes;
+/* number of total packets sent without errors */
+ struct regpair sent_ucast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_mcast_pkts;
+/* number of total packets sent without errors */
+ struct regpair sent_bcast_pkts;
+/* number of total packets dropped due to errors */
+ struct regpair error_drop_pkts;
};
+
/*
* ETH Rx producers data
*/
struct eth_rx_rate_limit {
+/* Rate Limit Multiplier - (Storm Clock (MHz) * 8 / Desired Bandwidth (MB/s)) */
__le16 mult;
- __le16 cnst
- /* Constant term to add (or subtract from number of cycles) */;
+/* Constant term to add (or subtract from number of cycles) */
+ __le16 cnst;
u8 add_sub_cnst /* Add (1) or subtract (0) constant term */;
u8 reserved0;
__le16 reserved1;
};
+
+struct eth_ustorm_per_pf_stat {
+/* number of total ucast bytes received on loopback port without errors */
+ struct regpair rcv_lb_ucast_bytes;
+/* number of total mcast bytes received on loopback port without errors */
+ struct regpair rcv_lb_mcast_bytes;
+/* number of total bcast bytes received on loopback port without errors */
+ struct regpair rcv_lb_bcast_bytes;
+/* number of total ucast packets received on loopback port without errors */
+ struct regpair rcv_lb_ucast_pkts;
+/* number of total mcast packets received on loopback port without errors */
+ struct regpair rcv_lb_mcast_pkts;
+/* number of total bcast packets received on loopback port without errors */
+ struct regpair rcv_lb_bcast_pkts;
+ struct regpair rcv_gre_bytes /* Received GRE bytes */;
+ struct regpair rcv_vxlan_bytes /* Received VXLAN bytes */;
+ struct regpair rcv_geneve_bytes /* Received GENEVE bytes */;
+ struct regpair rcv_gre_pkts /* Received GRE packets */;
+ struct regpair rcv_vxlan_pkts /* Received VXLAN packets */;
+ struct regpair rcv_geneve_pkts /* Received GENEVE packets */;
+};
+
+
struct eth_ustorm_per_queue_stat {
struct regpair rcv_ucast_bytes;
struct regpair rcv_mcast_bytes;
@@ -907,6 +1166,7 @@ struct eth_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+
/*
* Event Ring Next Page Address
*/
@@ -920,10 +1180,12 @@ struct event_ring_next_addr {
*/
union event_ring_element {
struct event_ring_entry entry /* Event Ring Entry */;
- struct event_ring_next_addr next_addr /* Event Ring Next Page Address */
- ;
+/* Event Ring Next Page Address */
+ struct event_ring_next_addr next_addr;
};
+
+
/*
* Ports mode
*/
@@ -933,6 +1195,16 @@ enum fw_flow_ctrl_mode {
MAX_FW_FLOW_CTRL_MODE
};
+
+/*
+ * Major and Minor hsi Versions
+ */
+struct hsi_fp_ver_struct {
+ u8 minor_ver_arr[2] /* Minor Version of hsi loading pf */;
+ u8 major_ver_arr[2] /* Major Version of driver loading pf */;
+};
+
+
/*
* Integration Phase
*/
@@ -943,57 +1215,82 @@ enum integ_phase {
MAX_INTEG_PHASE
};
+
+/*
+ * Ports mode
+ */
+enum iwarp_ll2_tx_queues {
+/* LL2 queue for OOO packets sent in-order by the driver */
+ IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
+/* LL2 queue for unaligned packets sent aligned by the driver */
+ IWARP_LL2_ALIGNED_TX_QUEUE,
+ IWARP_LL2_ERROR /* Error indication */,
+ MAX_IWARP_LL2_TX_QUEUES
+};
+
+
/*
* Malicious VF error ID
*/
enum malicious_vf_error_id {
MALICIOUS_VF_NO_ERROR /* Zero placeholder value */,
- VF_PF_CHANNEL_NOT_READY
- /* Writing to VF/PF channel when it is not ready */,
+/* Writing to VF/PF channel when it is not ready */
+ VF_PF_CHANNEL_NOT_READY,
VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */,
VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */,
- ETH_PACKET_TOO_SMALL
- /* TX packet is shorter then reported on BDs or from minimal size */
- ,
- ETH_ILLEGAL_VLAN_MODE
- /* Tx packet with marked as insert VLAN when its illegal */,
+/* TX packet is shorter then reported on BDs or from minimal size */
+ ETH_PACKET_TOO_SMALL,
+/* Tx packet with marked as insert VLAN when its illegal */
+ ETH_ILLEGAL_VLAN_MODE,
ETH_MTU_VIOLATION /* TX packet is greater then MTU */,
- ETH_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */,
- ETH_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */,
- ETH_ILLEGAL_NBDS /* indicated number of BDs for the packet is illegal */
- ,
+/* TX packet has illegal inband tags marked */
+ ETH_ILLEGAL_INBAND_TAGS,
+/* Vlan cant be added to inband tag */
+ ETH_VLAN_INSERT_AND_INBAND_VLAN,
+/* indicated number of BDs for the packet is illegal */
+ ETH_ILLEGAL_NBDS,
ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */,
- ETH_INSUFFICIENT_BDS
- /* There are not enough BDs for transmission of even one packet */,
+/* There are not enough BDs for transmission of even one packet */
+ ETH_INSUFFICIENT_BDS,
ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */,
ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */,
- ETH_ZERO_SIZE_BD
- /* empty BD (which not contains control flags) is illegal */,
+/* empty BD (which not contains control flags) is illegal */
+ ETH_ZERO_SIZE_BD,
ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit */,
- ETH_INSUFFICIENT_PAYLOAD
- ,
+/* In LSO its expected that on the local BD ring there will be at least MSS
+ * bytes of data
+ */
+ ETH_INSUFFICIENT_PAYLOAD,
ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */,
- ETH_TUNN_IPV6_EXT_NBD_ERR
- /* Tunneled packet with IPv6+Ext without a proper number of BDs */,
+/* Tunneled packet with IPv6+Ext without a proper number of BDs */
+ ETH_TUNN_IPV6_EXT_NBD_ERR,
+ ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
MAX_MALICIOUS_VF_ERROR_ID
};
+
+
/*
* Mstorm non-triggering VF zone
*/
struct mstorm_non_trigger_vf_zone {
- struct eth_mstorm_per_queue_stat eth_queue_stat
- /* VF statistic bucket */;
+/* VF statistic bucket */
+ struct eth_mstorm_per_queue_stat eth_queue_stat;
+/* VF RX queues producers */
+ struct eth_rx_prod_data
+ eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
};
+
/*
* Mstorm VF zone
*/
struct mstorm_vf_zone {
- struct mstorm_non_trigger_vf_zone non_trigger
- /* non-interrupt-triggering zone */;
+/* non-interrupt-triggering zone */
+ struct mstorm_non_trigger_vf_zone non_trigger;
};
+
/*
* personality per PF
*/
@@ -1009,25 +1306,27 @@ enum personality_type {
MAX_PERSONALITY_TYPE
};
+
/*
* tunnel configuration
*/
struct pf_start_tunnel_config {
- u8 set_vxlan_udp_port_flg /* Set VXLAN tunnel UDP destination port. */;
- u8 set_geneve_udp_port_flg /* Set GENEVE tunnel UDP destination port. */
- ;
+/* Set VXLAN tunnel UDP destination port. */
+ u8 set_vxlan_udp_port_flg;
+/* Set GENEVE tunnel UDP destination port. */
+ u8 set_geneve_udp_port_flg;
u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
- u8 tx_enable_l2geneve /* If set, enable l2 GENEVE tunnel in TX path. */
- ;
- u8 tx_enable_ipgeneve /* If set, enable IP GENEVE tunnel in TX path. */
- ;
+/* If set, enable l2 GENEVE tunnel in TX path. */
+ u8 tx_enable_l2geneve;
+/* If set, enable IP GENEVE tunnel in TX path. */
+ u8 tx_enable_ipgeneve;
u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
- u8 tunnel_clss_l2geneve
- /* Classification scheme for l2 GENEVE tunnel. */;
- u8 tunnel_clss_ipgeneve
- /* Classification scheme for ip GENEVE tunnel. */;
+/* Classification scheme for l2 GENEVE tunnel. */
+ u8 tunnel_clss_l2geneve;
+/* Classification scheme for ip GENEVE tunnel. */
+ u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
@@ -1039,70 +1338,92 @@ struct pf_start_tunnel_config {
*/
struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr /* Address of event ring PBL */;
- struct regpair consolid_q_pbl_addr
- /* PBL address of consolidation queue */;
- struct pf_start_tunnel_config tunnel_config /* tunnel configuration. */
- ;
+/* PBL address of consolidation queue */
+ struct regpair consolid_q_pbl_addr;
+/* tunnel configuration. */
+ struct pf_start_tunnel_config tunnel_config;
__le16 event_ring_sb_id /* Status block ID */;
+/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
u8 base_vf_id;
- ;
u8 num_vfs /* Amount of vfs owned by PF */;
u8 event_ring_num_pages /* Number of PBL pages in event ring */;
u8 event_ring_sb_index /* Status block index */;
u8 path_id /* HW path ID (engine ID) */;
u8 warning_as_error /* In FW asserts, treat warning as error */;
- u8 dont_log_ramrods
- /* If not set - throw a warning for each ramrod (for debug) */;
+/* If not set - throw a warning for each ramrod (for debug) */
+ u8 dont_log_ramrods;
u8 personality /* define what type of personality is new PF */;
+/* Log type mask. Each bit set enables a corresponding event type logging.
+ * Event types are defined as ASSERT_LOG_TYPE_xxx
+ */
__le16 log_type_mask;
u8 mf_mode /* Multi function mode */;
u8 integ_phase /* Integration phase */;
+/* If set, inter-pf tx switching is allowed in Switch Independent func mode */
u8 allow_npar_tx_switching;
+/* Map from inner to outer priority. Set pri_map_valid when init map */
u8 inner_to_outer_pri_map[8];
- u8 pri_map_valid
- /* If inner_to_outer_pri_map is initialize then set pri_map_valid */
- ;
+/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
+ u8 pri_map_valid;
+/* In case mf_mode is MF_OVLAN, this field specifies the outer vlan
+ * (lower 16 bits) and ethType to use (higher 16 bits)
+ */
__le32 outer_tag;
- u8 reserved0[4];
+/* FP HSI version to be used by FW */
+ struct hsi_fp_ver_struct hsi_fp_ver;
};
+
+
/*
* Data for port update ramrod
*/
struct protocol_dcb_data {
u8 dcb_enable_flag /* dcbEnable flag value */;
+ u8 dscp_enable_flag /* If set use dscp value */;
u8 dcb_priority /* dcbPri flag value */;
u8 dcb_tc /* dcb TC value */;
- u8 reserved;
+ u8 dscp_val /* dscp value to write if dscp_enable_flag is set */;
+ u8 reserved0;
};
/*
- * tunnel configuration
+ * Update tunnel configuration
*/
struct pf_update_tunnel_config {
+/* Update RX per PF tunnel classification scheme. */
u8 update_rx_pf_clss;
+/* Update per PORT default tunnel RX classification scheme for traffic with
+ * unknown unicast outer MAC in NPAR mode.
+ */
+ u8 update_rx_def_ucast_clss;
+/* Update per PORT default tunnel RX classification scheme for traffic with non
+ * unicast outer MAC in NPAR mode.
+ */
+ u8 update_rx_def_non_ucast_clss;
+/* Update TX per PF tunnel classification scheme. used by pf update. */
u8 update_tx_pf_clss;
- u8 set_vxlan_udp_port_flg
- /* Update VXLAN tunnel UDP destination port. */;
- u8 set_geneve_udp_port_flg
- /* Update GENEVE tunnel UDP destination port. */;
+/* Update VXLAN tunnel UDP destination port. */
+ u8 set_vxlan_udp_port_flg;
+/* Update GENEVE tunnel UDP destination port. */
+ u8 set_geneve_udp_port_flg;
u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
- u8 tx_enable_l2geneve /* If set, enable l2 GENEVE tunnel in TX path. */
- ;
- u8 tx_enable_ipgeneve /* If set, enable IP GENEVE tunnel in TX path. */
- ;
+/* If set, enable l2 GENEVE tunnel in TX path. */
+ u8 tx_enable_l2geneve;
+/* If set, enable IP GENEVE tunnel in TX path. */
+ u8 tx_enable_ipgeneve;
u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
- u8 tunnel_clss_l2geneve
- /* Classification scheme for l2 GENEVE tunnel. */;
- u8 tunnel_clss_ipgeneve
- /* Classification scheme for ip GENEVE tunnel. */;
+/* Classification scheme for l2 GENEVE tunnel. */
+ u8 tunnel_clss_l2geneve;
+/* Classification scheme for ip GENEVE tunnel. */
+ u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
- __le16 reserved[3];
+ __le16 reserved[2];
};
/*
@@ -1114,22 +1435,27 @@ struct pf_update_ramrod_data {
u8 update_fcoe_dcb_data_flag /* Update FCOE DCB data indication */;
u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB data indication */;
u8 update_roce_dcb_data_flag /* Update ROCE DCB data indication */;
+/* Update RROCE (RoceV2) DCB data indication */
+ u8 update_rroce_dcb_data_flag;
u8 update_iwarp_dcb_data_flag /* Update IWARP DCB data indication */;
u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
- u8 reserved;
struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
- struct protocol_dcb_data iscsi_dcb_data /* core iscsi related fields */
- ;
+/* core iscsi related fields */
+ struct protocol_dcb_data iscsi_dcb_data;
struct protocol_dcb_data roce_dcb_data /* core roce related fields */;
- struct protocol_dcb_data iwarp_dcb_data /* core iwarp related fields */
- ;
+/* core roce related fields */
+ struct protocol_dcb_data rroce_dcb_data;
+/* core iwarp related fields */
+ struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan /* new outer vlan id value */;
- __le16 reserved2;
- struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */
- ;
+ __le16 reserved;
+/* tunnel configuration. */
+ struct pf_update_tunnel_config tunnel_config;
};
+
+
/*
* Ports mode
*/
@@ -1142,6 +1468,19 @@ enum ports_mode {
MAX_PORTS_MODE
};
+
+
+/*
+ * use to index in hsi_fp_[major|minor]_ver_arr per protocol
+ */
+enum protocol_version_array_key {
+ ETH_VER_KEY = 0,
+ ROCE_VER_KEY,
+ MAX_PROTOCOL_VERSION_ARRAY_KEY
+};
+
+
+
/*
* RDMA TX Stats
*/
@@ -1154,20 +1493,22 @@ struct rdma_sent_stats {
* Pstorm non-triggering VF zone
*/
struct pstorm_non_trigger_vf_zone {
- struct eth_pstorm_per_queue_stat eth_queue_stat
- /* VF statistic bucket */;
+/* VF statistic bucket */
+ struct eth_pstorm_per_queue_stat eth_queue_stat;
struct rdma_sent_stats rdma_stats /* RoCE sent statistics */;
};
+
/*
* Pstorm VF zone
*/
struct pstorm_vf_zone {
- struct pstorm_non_trigger_vf_zone non_trigger
- /* non-interrupt-triggering zone */;
+/* non-interrupt-triggering zone */
+ struct pstorm_non_trigger_vf_zone non_trigger;
struct regpair reserved[7] /* vf_zone size mus be power of 2 */;
};
+
/*
* Ramrod Header of SPQE
*/
@@ -1178,6 +1519,7 @@ struct ramrod_header {
__le16 echo /* Ramrod echo */;
};
+
/*
* RDMA RX Stats
*/
@@ -1186,6 +1528,34 @@ struct rdma_rcv_stats {
struct regpair rcv_pkts /* number of total RDMA packets received */;
};
+
+
+/*
+ * Data for update QCN/DCQCN RL ramrod
+ */
+struct rl_update_ramrod_data {
+ u8 qcn_update_param_flg /* Update QCN global params: timeout. */;
+/* Update DCQCN global params: timeout, g, k. */
+ u8 dcqcn_update_param_flg;
+ u8 rl_init_flg /* Init RL parameters, when RL disabled. */;
+ u8 rl_start_flg /* Start RL in IDLE state. Set rate to maximum. */;
+ u8 rl_stop_flg /* Stop RL. */;
+ u8 rl_id_first /* ID of first or single RL, that will be updated. */;
+/* ID of last RL, that will be updated. If clear, single RL will updated. */
+ u8 rl_id_last;
+ u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
+ __le32 rl_bc_rate /* Byte Counter Limit. */;
+ __le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
+ __le16 rl_r_ai /* Active increase rate. */;
+ __le16 rl_r_hai /* Hyper active increase rate. */;
+ __le16 dcqcn_g /* DCQCN Alpha update gain in 1/64K resolution . */;
+ __le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
+ __le32 dcqcn_timeuot_us /* DCQCN timeout. */;
+ __le32 qcn_timeuot_us /* QCN timeout. */;
+ __le32 reserved[2];
+};
+
+
/*
* Slowpath Element (SPQE)
*/
@@ -1194,6 +1564,7 @@ struct slow_path_element {
struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */;
};
+
/*
* Tstorm non-triggering VF zone
*/
@@ -1201,65 +1572,88 @@ struct tstorm_non_trigger_vf_zone {
struct rdma_rcv_stats rdma_stats /* RoCE received statistics */;
};
+
struct tstorm_per_port_stat {
- struct regpair trunc_error_discard
- /* packet is dropped because it was truncated in NIG */;
- struct regpair mac_error_discard
- /* packet is dropped because of Ethernet FCS error */;
- struct regpair mftag_filter_discard
- /* packet is dropped because classification was unsuccessful */;
+/* packet is dropped because it was truncated in NIG */
+ struct regpair trunc_error_discard;
+/* packet is dropped because of Ethernet FCS error */
+ struct regpair mac_error_discard;
+/* packet is dropped because classification was unsuccessful */
+ struct regpair mftag_filter_discard;
+/* packet was passed to Ethernet and dropped because of no mac filter match */
struct regpair eth_mac_filter_discard;
+/* packet passed to Light L2 and dropped because Light L2 is not configured for
+ * this PF
+ */
struct regpair ll2_mac_filter_discard;
+/* packet passed to Light L2 and dropped because Light L2 is not configured for
+ * this PF
+ */
struct regpair ll2_conn_disabled_discard;
- struct regpair iscsi_irregular_pkt
- /* packet is an ISCSI irregular packet */;
- struct regpair fcoe_irregular_pkt
- /* packet is an FCOE irregular packet */;
- struct regpair roce_irregular_pkt
- /* packet is an ROCE irregular packet */;
- struct regpair eth_irregular_pkt /* packet is an ETH irregular packet */
- ;
- struct regpair toe_irregular_pkt /* packet is an TOE irregular packet */
- ;
- struct regpair preroce_irregular_pkt
- /* packet is an PREROCE irregular packet */;
+/* packet is an ISCSI irregular packet */
+ struct regpair iscsi_irregular_pkt;
+/* packet is an FCOE irregular packet */
+ struct regpair fcoe_irregular_pkt;
+/* packet is an ROCE irregular packet */
+ struct regpair roce_irregular_pkt;
+/* packet is an ETH irregular packet */
+ struct regpair eth_irregular_pkt;
+/* packet is an TOE irregular packet */
+ struct regpair toe_irregular_pkt;
+/* packet is an PREROCE irregular packet */
+ struct regpair preroce_irregular_pkt;
+ struct regpair eth_gre_tunn_filter_discard /* GRE dropped packets */;
+/* VXLAN dropped packets */
+ struct regpair eth_vxlan_tunn_filter_discard;
+/* GENEVE dropped packets */
+ struct regpair eth_geneve_tunn_filter_discard;
};
+
/*
* Tstorm VF zone
*/
struct tstorm_vf_zone {
- struct tstorm_non_trigger_vf_zone non_trigger
- /* non-interrupt-triggering zone */;
+/* non-interrupt-triggering zone */
+ struct tstorm_non_trigger_vf_zone non_trigger;
};
+
/*
* Tunnel classification scheme
*/
enum tunnel_clss {
- TUNNEL_CLSS_MAC_VLAN =
- 0
- /* Use MAC & VLAN from first L2 header for vport classification. */
- ,
- TUNNEL_CLSS_MAC_VNI
- ,
- TUNNEL_CLSS_INNER_MAC_VLAN
- /* Use MAC and VLAN from last L2 header for vport classification */
- ,
- TUNNEL_CLSS_INNER_MAC_VNI
- ,
+/* Use MAC and VLAN from first L2 header for vport classification. */
+ TUNNEL_CLSS_MAC_VLAN = 0,
+/* Use MAC from first L2 header and VNI from tunnel header for vport
+ * classification
+ */
+ TUNNEL_CLSS_MAC_VNI,
+/* Use MAC and VLAN from last L2 header for vport classification */
+ TUNNEL_CLSS_INNER_MAC_VLAN,
+/* Use MAC from last L2 header and VNI from tunnel header for vport
+ * classification
+ */
+ TUNNEL_CLSS_INNER_MAC_VNI,
+/* Use MAC and VLAN from last L2 header for vport classification. If no exact
+ * match, use MAC and VLAN from first L2 header for classification.
+ */
+ TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
MAX_TUNNEL_CLSS
};
+
+
/*
* Ustorm non-triggering VF zone
*/
struct ustorm_non_trigger_vf_zone {
- struct eth_ustorm_per_queue_stat eth_queue_stat
- /* VF statistic bucket */;
+/* VF statistic bucket */
+ struct eth_ustorm_per_queue_stat eth_queue_stat;
struct regpair vf_pf_msg_addr /* VF-PF message address */;
};
+
/*
* Ustorm triggering VF zone
*/
@@ -1268,36 +1662,49 @@ struct ustorm_trigger_vf_zone {
u8 reserved[7];
};
+
/*
* Ustorm VF zone
*/
struct ustorm_vf_zone {
- struct ustorm_non_trigger_vf_zone non_trigger
- /* non-interrupt-triggering zone */;
+/* non-interrupt-triggering zone */
+ struct ustorm_non_trigger_vf_zone non_trigger;
struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */;
};
+
/*
* VF-PF channel data
*/
struct vf_pf_channel_data {
+/* 0: VF-PF Channel NOT ready. Waiting for ack from PF driver. 1: VF-PF Channel
+ * is ready for a new transaction.
+ */
__le32 ready;
+/* 0: VF-PF Channel is invalid because of malicious VF. 1: VF-PF Channel is
+ * valid.
+ */
u8 valid;
u8 reserved0;
__le16 reserved1;
};
+
/*
* Ramrod data for VF start ramrod
*/
struct vf_start_ramrod_data {
u8 vf_id /* VF ID */;
+/* If set, initial cleanup ack will be sent to parent PF SP event queue */
u8 enable_flr_ack;
__le16 opaque_fid /* VF opaque FID */;
u8 personality /* define what type of personality is new VF */;
- u8 reserved[3];
+ u8 reserved[7];
+/* FP HSI version to be used by FW */
+ struct hsi_fp_ver_struct hsi_fp_ver;
};
+
/*
* Ramrod data for VF start ramrod
*/
@@ -1308,6 +1715,23 @@ struct vf_stop_ramrod_data {
__le32 reserved2;
};
+
+/*
+ * VF zone size mode.
+ */
+enum vf_zone_size_mode {
+/* Default VF zone size. Up to 192 VF supported. */
+ VF_ZONE_SIZE_MODE_DEFAULT,
+/* Doubled VF zone size. Up to 96 VF supported. */
+ VF_ZONE_SIZE_MODE_DOUBLE,
+/* Quad VF zone size. Up to 48 VF supported. */
+ VF_ZONE_SIZE_MODE_QUAD,
+ MAX_VF_ZONE_SIZE_MODE
+};
+
+
+
+
/*
* Attentions status block
*/
@@ -1319,174 +1743,10 @@ struct atten_status_block {
__le32 reserved1;
};
-enum block_addr {
- GRCBASE_GRC = 0x50000,
- GRCBASE_MISCS = 0x9000,
- GRCBASE_MISC = 0x8000,
- GRCBASE_DBU = 0xa000,
- GRCBASE_PGLUE_B = 0x2a8000,
- GRCBASE_CNIG = 0x218000,
- GRCBASE_CPMU = 0x30000,
- GRCBASE_NCSI = 0x40000,
- GRCBASE_OPTE = 0x53000,
- GRCBASE_BMB = 0x540000,
- GRCBASE_PCIE = 0x54000,
- GRCBASE_MCP = 0xe00000,
- GRCBASE_MCP2 = 0x52000,
- GRCBASE_PSWHST = 0x2a0000,
- GRCBASE_PSWHST2 = 0x29e000,
- GRCBASE_PSWRD = 0x29c000,
- GRCBASE_PSWRD2 = 0x29d000,
- GRCBASE_PSWWR = 0x29a000,
- GRCBASE_PSWWR2 = 0x29b000,
- GRCBASE_PSWRQ = 0x280000,
- GRCBASE_PSWRQ2 = 0x240000,
- GRCBASE_PGLCS = 0x0,
- GRCBASE_DMAE = 0xc000,
- GRCBASE_PTU = 0x560000,
- GRCBASE_TCM = 0x1180000,
- GRCBASE_MCM = 0x1200000,
- GRCBASE_UCM = 0x1280000,
- GRCBASE_XCM = 0x1000000,
- GRCBASE_YCM = 0x1080000,
- GRCBASE_PCM = 0x1100000,
- GRCBASE_QM = 0x2f0000,
- GRCBASE_TM = 0x2c0000,
- GRCBASE_DORQ = 0x100000,
- GRCBASE_BRB = 0x340000,
- GRCBASE_SRC = 0x238000,
- GRCBASE_PRS = 0x1f0000,
- GRCBASE_TSDM = 0xfb0000,
- GRCBASE_MSDM = 0xfc0000,
- GRCBASE_USDM = 0xfd0000,
- GRCBASE_XSDM = 0xf80000,
- GRCBASE_YSDM = 0xf90000,
- GRCBASE_PSDM = 0xfa0000,
- GRCBASE_TSEM = 0x1700000,
- GRCBASE_MSEM = 0x1800000,
- GRCBASE_USEM = 0x1900000,
- GRCBASE_XSEM = 0x1400000,
- GRCBASE_YSEM = 0x1500000,
- GRCBASE_PSEM = 0x1600000,
- GRCBASE_RSS = 0x238800,
- GRCBASE_TMLD = 0x4d0000,
- GRCBASE_MULD = 0x4e0000,
- GRCBASE_YULD = 0x4c8000,
- GRCBASE_XYLD = 0x4c0000,
- GRCBASE_PRM = 0x230000,
- GRCBASE_PBF_PB1 = 0xda0000,
- GRCBASE_PBF_PB2 = 0xda4000,
- GRCBASE_RPB = 0x23c000,
- GRCBASE_BTB = 0xdb0000,
- GRCBASE_PBF = 0xd80000,
- GRCBASE_RDIF = 0x300000,
- GRCBASE_TDIF = 0x310000,
- GRCBASE_CDU = 0x580000,
- GRCBASE_CCFC = 0x2e0000,
- GRCBASE_TCFC = 0x2d0000,
- GRCBASE_IGU = 0x180000,
- GRCBASE_CAU = 0x1c0000,
- GRCBASE_UMAC = 0x51000,
- GRCBASE_XMAC = 0x210000,
- GRCBASE_DBG = 0x10000,
- GRCBASE_NIG = 0x500000,
- GRCBASE_WOL = 0x600000,
- GRCBASE_BMBN = 0x610000,
- GRCBASE_IPC = 0x20000,
- GRCBASE_NWM = 0x800000,
- GRCBASE_NWS = 0x700000,
- GRCBASE_MS = 0x6a0000,
- GRCBASE_PHY_PCIE = 0x620000,
- GRCBASE_MISC_AEU = 0x8000,
- GRCBASE_BAR0_MAP = 0x1c00000,
- MAX_BLOCK_ADDR
-};
-
-enum block_id {
- BLOCK_GRC,
- BLOCK_MISCS,
- BLOCK_MISC,
- BLOCK_DBU,
- BLOCK_PGLUE_B,
- BLOCK_CNIG,
- BLOCK_CPMU,
- BLOCK_NCSI,
- BLOCK_OPTE,
- BLOCK_BMB,
- BLOCK_PCIE,
- BLOCK_MCP,
- BLOCK_MCP2,
- BLOCK_PSWHST,
- BLOCK_PSWHST2,
- BLOCK_PSWRD,
- BLOCK_PSWRD2,
- BLOCK_PSWWR,
- BLOCK_PSWWR2,
- BLOCK_PSWRQ,
- BLOCK_PSWRQ2,
- BLOCK_PGLCS,
- BLOCK_DMAE,
- BLOCK_PTU,
- BLOCK_TCM,
- BLOCK_MCM,
- BLOCK_UCM,
- BLOCK_XCM,
- BLOCK_YCM,
- BLOCK_PCM,
- BLOCK_QM,
- BLOCK_TM,
- BLOCK_DORQ,
- BLOCK_BRB,
- BLOCK_SRC,
- BLOCK_PRS,
- BLOCK_TSDM,
- BLOCK_MSDM,
- BLOCK_USDM,
- BLOCK_XSDM,
- BLOCK_YSDM,
- BLOCK_PSDM,
- BLOCK_TSEM,
- BLOCK_MSEM,
- BLOCK_USEM,
- BLOCK_XSEM,
- BLOCK_YSEM,
- BLOCK_PSEM,
- BLOCK_RSS,
- BLOCK_TMLD,
- BLOCK_MULD,
- BLOCK_YULD,
- BLOCK_XYLD,
- BLOCK_PRM,
- BLOCK_PBF_PB1,
- BLOCK_PBF_PB2,
- BLOCK_RPB,
- BLOCK_BTB,
- BLOCK_PBF,
- BLOCK_RDIF,
- BLOCK_TDIF,
- BLOCK_CDU,
- BLOCK_CCFC,
- BLOCK_TCFC,
- BLOCK_IGU,
- BLOCK_CAU,
- BLOCK_UMAC,
- BLOCK_XMAC,
- BLOCK_DBG,
- BLOCK_NIG,
- BLOCK_WOL,
- BLOCK_BMBN,
- BLOCK_IPC,
- BLOCK_NWM,
- BLOCK_NWS,
- BLOCK_MS,
- BLOCK_PHY_PCIE,
- BLOCK_MISC_AEU,
- BLOCK_BAR0_MAP,
- MAX_BLOCK_ID
-};
/*
* Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
*/
enum command_type_bit {
IGU_COMMAND_TYPE_NOP = 0,
@@ -1494,61 +1754,100 @@ enum command_type_bit {
MAX_COMMAND_TYPE_BIT
};
+
/*
* DMAE command
*/
struct dmae_cmd {
__le32 opcode;
+/* DMA Source. 0 - PCIe, 1 - GRC (use enum dmae_cmd_src_enum) */
#define DMAE_CMD_SRC_MASK 0x1
#define DMAE_CMD_SRC_SHIFT 0
+/* DMA destination. 0 - None, 1 - PCIe, 2 - GRC, 3 - None
+ * (use enum dmae_cmd_dst_enum)
+ */
#define DMAE_CMD_DST_MASK 0x3
#define DMAE_CMD_DST_SHIFT 1
+/* Completion destination. 0 - PCie, 1 - GRC (use enum dmae_cmd_c_dst_enum) */
#define DMAE_CMD_C_DST_MASK 0x1
#define DMAE_CMD_C_DST_SHIFT 3
+/* Reset the CRC result (do not use the previous result as the seed) */
#define DMAE_CMD_CRC_RESET_MASK 0x1
#define DMAE_CMD_CRC_RESET_SHIFT 4
+/* Reset the source address in the next go to the same source address of the
+ * previous go
+ */
#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+/* Reset the destination address in the next go to the same destination address
+ * of the previous go
+ */
#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+/* 0 completion function is the same as src function, 1 - 0 completion
+ * function is the same as dst function (use enum dmae_cmd_comp_func_enum)
+ */
#define DMAE_CMD_COMP_FUNC_MASK 0x1
#define DMAE_CMD_COMP_FUNC_SHIFT 7
+/* 0 - Do not write a completion word, 1 - Write a completion word
+ * (use enum dmae_cmd_comp_word_en_enum)
+ */
#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+/* 0 - Do not write a CRC word, 1 - Write a CRC word
+ * (use enum dmae_cmd_comp_crc_en_enum)
+ */
#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+/* The CRC word should be taken from the DMAE address space from address 9+X,
+ * where X is the value in these bits.
+ */
#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
#define DMAE_CMD_RESERVED1_MASK 0x1
#define DMAE_CMD_RESERVED1_SHIFT 13
#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+/* The field specifies how the completion word is affected by PCIe read error. 0
+ * Send a regular completion, 1 - Send a completion with an error indication,
+ * 2 do not send a completion (use enum dmae_cmd_error_handling_enum)
+ */
#define DMAE_CMD_ERR_HANDLING_MASK 0x3
#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+/* The port ID to be placed on the RF FID field of the GRC bus. this field is
+ * used both when GRC is the destination and when it is the source of the DMAE
+ * transaction.
+ */
#define DMAE_CMD_PORT_ID_MASK 0x3
#define DMAE_CMD_PORT_ID_SHIFT 18
+/* Source PCI function number [3:0] */
#define DMAE_CMD_SRC_PF_ID_MASK 0xF
#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+/* Destination PCI function number [3:0] */
#define DMAE_CMD_DST_PF_ID_MASK 0xF
#define DMAE_CMD_DST_PF_ID_SHIFT 24
-#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 /* Source VFID valid */
#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
-#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 /* Destination VFID valid */
#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
#define DMAE_CMD_RESERVED2_MASK 0x3
#define DMAE_CMD_RESERVED2_SHIFT 30
- __le32 src_addr_lo
- /* PCIe source address low in bytes or GRC source address in DW */;
+/* PCIe source address low in bytes or GRC source address in DW */
+ __le32 src_addr_lo;
+/* PCIe source address high in bytes or reserved (if source is GRC) */
__le32 src_addr_hi;
+/* PCIe destination address low in bytes or GRC destination address in DW */
__le32 dst_addr_lo;
+/* PCIe destination address high in bytes or reserved (if destination is GRC) */
__le32 dst_addr_hi;
- __le16 length /* Length in DW */;
+ __le16 length_dw /* Length in DW */;
__le16 opcode_b;
-#define DMAE_CMD_SRC_VF_ID_MASK 0xFF
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
#define DMAE_CMD_SRC_VF_ID_SHIFT 0
-#define DMAE_CMD_DST_VF_ID_MASK 0xFF
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
#define DMAE_CMD_DST_VF_ID_SHIFT 8
__le32 comp_addr_lo /* PCIe completion address low or grc address */;
+/* PCIe completion address high or reserved (if completion address is in GRC) */
__le32 comp_addr_hi;
__le32 comp_val /* Value to write to completion address */;
__le32 crc32 /* crc16 result */;
@@ -1561,43 +1860,65 @@ struct dmae_cmd {
__le16 xsum8 /* checksum8 result */;
};
-struct fw_ver_num {
- u8 major /* Firmware major version number */;
- u8 minor /* Firmware minor version number */;
- u8 rev /* Firmware revision version number */;
- u8 eng /* Firmware engineering version number (for bootleg versions) */
- ;
+
+enum dmae_cmd_comp_crc_en_enum {
+ dmae_cmd_comp_crc_disabled /* Do not write a CRC word */,
+ dmae_cmd_comp_crc_enabled /* Write a CRC word */,
+ MAX_DMAE_CMD_COMP_CRC_EN_ENUM
};
-struct fw_ver_info {
- __le16 tools_ver /* Tools version number */;
- u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
- u8 reserved1;
- struct fw_ver_num num /* FW version number */;
- __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
- __le32 reserved2;
+
+enum dmae_cmd_comp_func_enum {
+/* completion word and/or CRC will be sent to SRC-PCI function/SRC VFID */
+ dmae_cmd_comp_func_to_src,
+/* completion word and/or CRC will be sent to DST-PCI function/DST VFID */
+ dmae_cmd_comp_func_to_dst,
+ MAX_DMAE_CMD_COMP_FUNC_ENUM
};
-struct storm_ram_section {
- __le16 offset
- /* The offset of the section in the RAM (in 64 bit units) */;
- __le16 size /* The size of the section (in 64 bit units) */;
+
+enum dmae_cmd_comp_word_en_enum {
+ dmae_cmd_comp_word_disabled /* Do not write a completion word */,
+ dmae_cmd_comp_word_enabled /* Write the completion word */,
+ MAX_DMAE_CMD_COMP_WORD_EN_ENUM
};
-struct fw_info {
- struct fw_ver_info ver /* FW version information */;
- struct storm_ram_section fw_asserts_section
- /* The FW Asserts offset/size in Storm RAM */;
- __le32 reserved;
+
+enum dmae_cmd_c_dst_enum {
+ dmae_cmd_c_dst_pcie,
+ dmae_cmd_c_dst_grc,
+ MAX_DMAE_CMD_C_DST_ENUM
+};
+
+
+enum dmae_cmd_dst_enum {
+ dmae_cmd_dst_none_0,
+ dmae_cmd_dst_pcie,
+ dmae_cmd_dst_grc,
+ dmae_cmd_dst_none_3,
+ MAX_DMAE_CMD_DST_ENUM
};
-struct fw_info_location {
- __le32 grc_addr /* GRC address where the fw_info struct is located. */;
- __le32 size
- /* Size of the fw_info structure (thats located at the grc_addr). */
- ;
+
+enum dmae_cmd_error_handling_enum {
+/* Send a regular completion (with no error indication) */
+ dmae_cmd_error_handling_send_regular_comp,
+/* Send a completion with an error indication (i.e. set bit 31 of the completion
+ * word)
+ */
+ dmae_cmd_error_handling_send_comp_with_err,
+ dmae_cmd_error_handling_dont_send_comp /* Do not send a completion */,
+ MAX_DMAE_CMD_ERROR_HANDLING_ENUM
+};
+
+
+enum dmae_cmd_src_enum {
+ dmae_cmd_src_pcie /* The source is the PCIe */,
+ dmae_cmd_src_grc /* The source is the GRC */,
+ MAX_DMAE_CMD_SRC_ENUM
};
+
/*
* IGU cleanup command
*/
@@ -1605,15 +1926,18 @@ struct igu_cleanup {
__le32 sb_id_and_flags;
#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
#define IGU_CLEANUP_RESERVED0_SHIFT 0
+/* cleanup clear - 0, set - 1 */
#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+/* must always be set (use enum command_type_bit) */
#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
__le32 reserved1;
};
+
/*
* IGU firmware driver command
*/
@@ -1622,6 +1946,7 @@ union igu_command {
struct igu_cleanup cleanup;
};
+
/*
* IGU firmware driver command
*/
@@ -1632,10 +1957,12 @@ struct igu_command_reg_ctrl {
#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+/* command typ: 0 - read, 1 - write */
#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
};
+
/*
* IGU mapping line structure
*/
@@ -1645,9 +1972,10 @@ struct igu_mapping_line {
#define IGU_MAPPING_LINE_VALID_SHIFT 0
#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+/* In BB: VF-0-120, PF-0-7; In K2: VF-0-191, PF-0-15 */
#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
-#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
@@ -1655,6 +1983,7 @@ struct igu_mapping_line {
#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
};
+
/*
* IGU MSIX line structure
*/
@@ -1672,65 +2001,37 @@ struct igu_msix_vector {
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
-enum init_modes {
- MODE_BB_A0,
- MODE_BB_B0,
- MODE_K2,
- MODE_ASIC,
- MODE_EMUL_REDUCED,
- MODE_EMUL_FULL,
- MODE_FPGA,
- MODE_CHIPSIM,
- MODE_SF,
- MODE_MF_SD,
- MODE_MF_SI,
- MODE_PORTS_PER_ENG_1,
- MODE_PORTS_PER_ENG_2,
- MODE_PORTS_PER_ENG_4,
- MODE_100G,
- MODE_EAGLE_ENG1_WORKAROUND,
- MAX_INIT_MODES
-};
-
-enum init_phases {
- PHASE_ENGINE,
- PHASE_PORT,
- PHASE_PF,
- PHASE_VF,
- PHASE_QM_PF,
- MAX_INIT_PHASES
-};
struct mstorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0 /* word0 */;
__le16 word1 /* word1 */;
@@ -1738,36 +2039,48 @@ struct mstorm_core_conn_ag_ctx {
__le32 reg1 /* reg1 */;
};
+
/*
* per encapsulation type enabling flags
*/
struct prs_reg_encapsulation_type_en {
u8 flags;
+/* Enable bit for Ethernet-over-GRE (L2 GRE) encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+/* Enable bit for IP-over-GRE (IP GRE) encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+/* Enable bit for VXLAN encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+/* Enable bit for T-Tag encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+/* Enable bit for Ethernet-over-GENEVE (L2 GENEVE) encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+/* Enable bit for IP-over-GENEVE (IP GENEVE) encapsulation. */
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
};
+
enum pxp_tph_st_hint {
TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
- TPH_ST_HINT_TARGET
- /* Device Write and Host Read, or Host Write and Device Read */,
+/* Device Write and Host Read, or Host Write and Device Read */
+ TPH_ST_HINT_TARGET,
+/* Device Write and Host Read, or Host Write and Device Read - with temporal
+ * reuse
+ */
TPH_ST_HINT_TARGET_PRIO,
MAX_PXP_TPH_ST_HINT
};
+
/*
* QM hardware structure of enable bypass credit mask
*/
@@ -1791,6 +2104,7 @@ struct qm_rf_bypass_mask {
#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
};
+
/*
* QM hardware structure of opportunistic credit mask
*/
@@ -1818,83 +2132,95 @@ struct qm_rf_opportunistic_mask {
#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
};
+
/*
* QM hardware structure of QM map memory
*/
struct qm_rf_pq_map {
__le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+/* the first PQ associated with the VPORT and VOQ of this PQ */
#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
#define QM_RF_PQ_MAP_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
};
+
/*
* Completion params for aggregated interrupt completion
*/
struct sdm_agg_int_comp_params {
__le16 params;
+/* the number of aggregated interrupt, 0-31 */
#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
+/* 1 - set a bit in aggregated vector, 0 - dont set */
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+/* Number of bit in the aggregated vector, 0-279 (TBD) */
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
};
+
/*
* SDM operation gen command (generate aggregative interrupt)
*/
struct sdm_op_gen {
__le32 command;
+/* completion parameters 0-15 */
#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
-#define SDM_OP_GEN_COMP_TYPE_MASK 0xF
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
-#define SDM_OP_GEN_RESERVED_MASK 0xFFF
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
+
+
+
+
struct ystorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
new file mode 100644
index 00000000..e82b0d4c
--- /dev/null
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -0,0 +1,1025 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_DEBUG_TOOLS__
+#define __ECORE_HSI_DEBUG_TOOLS__
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
+
+enum block_addr {
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x620000,
+ GRCBASE_LED = 0x6b8000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
+ MAX_BLOCK_ADDR
+};
+
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_DMAE,
+ BLOCK_PTU,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_LED,
+ BLOCK_MISC_AEU,
+ BLOCK_BAR0_MAP,
+ MAX_BLOCK_ID
+};
+
+
+/*
+ * binary debug buffer types
+ */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE /* init modes tree */,
+ BIN_BUF_DBG_DUMP_REG /* GRC Dump registers */,
+ BIN_BUF_DBG_DUMP_MEM /* GRC Dump memories */,
+ BIN_BUF_DBG_IDLE_CHK_REGS /* Idle Check registers */,
+ BIN_BUF_DBG_IDLE_CHK_IMMS /* Idle Check immediates */,
+ BIN_BUF_DBG_IDLE_CHK_RULES /* Idle Check rules */,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA /* Idle Check parsing data */,
+ BIN_BUF_DBG_ATTN_BLOCKS /* Attention blocks */,
+ BIN_BUF_DBG_ATTN_REGS /* Attention registers */,
+ BIN_BUF_DBG_ATTN_INDEXES /* Attention indexes */,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS /* Attention name offsets */,
+ BIN_BUF_DBG_PARSING_STRINGS /* Debug Tools parsing strings */,
+ MAX_BIN_DBG_BUFFER_TYPE
+};
+
+
+/*
+ * Attention bit mapping
+ */
+struct dbg_attn_bit_mapping {
+ __le16 data;
+/* The index of an attention in the blocks attentions list
+ * (if is_unused_idx_cnt=0), or a number of consecutive unused attention bits
+ * (if is_unused_idx_cnt=1)
+ */
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+/* if set, the val field indicates the number of consecutive unused attention
+ * bits
+ */
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
+};
+
+
+/*
+ * Attention block per-type data
+ */
+struct dbg_attn_block_type_data {
+/* Offset of this block attention names in the debug attention name offsets
+ * array
+ */
+ __le16 names_offset;
+ __le16 reserved1;
+ u8 num_regs /* Number of attention registers in this block */;
+ u8 reserved2;
+/* Offset of this blocks attention registers in the attention registers array
+ * (in dbg_attn_reg units)
+ */
+ __le16 regs_offset;
+};
+
+/*
+ * Block attentions
+ */
+struct dbg_attn_block {
+/* attention block per-type data. Count must match the number of elements in
+ * dbg_attn_type.
+ */
+ struct dbg_attn_block_type_data per_type_data[2];
+};
+
+
+/*
+ * Attention register result
+ */
+struct dbg_attn_reg_result {
+ __le32 data;
+/* STS attention register GRC address (in dwords) */
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+/* Number of attention indexes in this register */
+#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
+/* Offset of this registers block attention indexes (values in the range
+ * 0..number of block attentions)
+ */
+ __le16 attn_idx_offset;
+ __le16 reserved;
+ __le32 sts_val /* Value read from the STS attention register */;
+ __le32 mask_val /* Value read from the MASK attention register */;
+};
+
+/*
+ * Attention block result
+ */
+struct dbg_attn_block_result {
+ u8 block_id /* Registers block ID */;
+ u8 data;
+/* Value from dbg_attn_type enum */
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+/* Number of registers in the blok in which at least one attention bit is set */
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+/* Offset of this registers block attention names in the attention name offsets
+ * array
+ */
+ __le16 names_offset;
+/* result data for each register in the block in which at least one attention
+ * bit is set
+ */
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+
+
+/*
+ * mode header
+ */
+struct dbg_mode_hdr {
+ __le16 data;
+/* indicates if a mode expression should be evaluated (0/1) */
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+/* offset (in bytes) in modes expression buffer. valid only if eval_mode is
+ * set.
+ */
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/*
+ * Attention register
+ */
+struct dbg_attn_reg {
+ struct dbg_mode_hdr mode /* Mode header */;
+/* Offset of this registers block attention indexes (values in the range
+ * 0..number of block attentions)
+ */
+ __le16 attn_idx_offset;
+ __le32 data;
+/* STS attention register GRC address (in dwords) */
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+/* Number of attention indexes in this register */
+#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
+#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+/* STS_CLR attention register GRC address (in dwords) */
+ __le32 sts_clr_address;
+/* MASK attention register GRC address (in dwords) */
+ __le32 mask_address;
+};
+
+
+
+/*
+ * attention types
+ */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+
+/*
+ * condition header for registers dump
+ */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode /* Mode header */;
+ u8 block_id /* block ID */;
+ u8 data_size /* size in dwords of the data following this header */;
+};
+
+
+/*
+ * memory data for registers dump
+ */
+struct dbg_dump_mem {
+ __le32 dword0;
+/* register address (in dwords) */
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF /* memory group ID */
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ __le32 dword1;
+/* register size (in dwords) */
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
+#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+};
+
+
+/*
+ * register data for registers dump
+ */
+struct dbg_dump_reg {
+ __le32 data;
+/* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+
+/*
+ * split header for registers dump
+ */
+struct dbg_dump_split_hdr {
+ __le32 hdr;
+/* size in dwords of the data following this header */
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF /* split type ID */
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+
+/*
+ * condition header for idle check
+ */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode /* Mode header */;
+/* size in dwords of the data following this header */
+ __le16 data_size;
+};
+
+
+/*
+ * Idle Check condition register
+ */
+struct dbg_idle_chk_cond_reg {
+ __le32 data;
+/* Register GRC address (in dwords) */
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+/* value from block_id enum */
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ __le16 num_entries /* number of registers entries to check */;
+ u8 entry_size /* size of registers entry (in dwords) */;
+ u8 start_entry /* index of the first entry to check */;
+};
+
+
+/*
+ * Idle Check info register
+ */
+struct dbg_idle_chk_info_reg {
+ __le32 data;
+/* Register GRC address (in dwords) */
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+/* value from block_id enum */
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ __le16 size /* register size in dwords */;
+ struct dbg_mode_hdr mode /* Mode header */;
+};
+
+
+/*
+ * Idle Check register
+ */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg /* condition register */;
+ struct dbg_idle_chk_info_reg info_reg /* info register */;
+};
+
+
+/*
+ * Idle Check result header
+ */
+struct dbg_idle_chk_result_hdr {
+ __le16 rule_id /* Failing rule index */;
+ __le16 mem_entry_id /* Failing memory entry index */;
+ u8 num_dumped_cond_regs /* number of dumped condition registers */;
+ u8 num_dumped_info_regs /* number of dumped condition registers */;
+ u8 severity /* from dbg_idle_chk_severity_types enum */;
+ u8 reserved;
+};
+
+
+/*
+ * Idle Check result register header
+ */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+/* indicates if this register is a memory */
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+/* register index within the failing rule */
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry /* index of the first checked entry */;
+ __le16 size /* register size in dwords */;
+};
+
+
+/*
+ * Idle Check rule
+ */
+struct dbg_idle_chk_rule {
+ __le16 rule_id /* Idle Check rule ID */;
+ u8 severity /* value from dbg_idle_chk_severity_types enum */;
+ u8 cond_id /* Condition ID */;
+ u8 num_cond_regs /* number of condition registers */;
+ u8 num_info_regs /* number of info registers */;
+ u8 num_imms /* number of immediates in the condition */;
+ u8 reserved1;
+/* offset of this rules registers in the idle check register array
+ * (in dbg_idle_chk_reg units)
+ */
+ __le16 reg_offset;
+/* offset of this rules immediate values in the immediate values array
+ * (in dwords)
+ */
+ __le16 imm_offset;
+};
+
+
+/*
+ * Idle Check rule parsing data
+ */
+struct dbg_idle_chk_rule_parsing_data {
+ __le32 data;
+/* indicates if this register has a FW message */
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+/* Offset of this rules strings in the debug strings array (in bytes) */
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+
+/*
+ * idle check severity types
+ */
+enum dbg_idle_chk_severity_types {
+/* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+/* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+/* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+
+
+/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block_data {
+/* Indicates if the block is enabled for recording (0/1) */
+ u8 enabled;
+ u8 hw_id /* HW ID associated with the block */;
+ u8 line_num /* Debug line number to select */;
+ u8 right_shift /* Number of units to right the debug data (0-3) */;
+ u8 cycle_en /* 4-bit value: bit i set -> unit i is enabled. */;
+/* 4-bit value: bit i set -> unit i is forced valid. */
+ u8 force_valid;
+/* 4-bit value: bit i set -> unit i frame bit is forced. */
+ u8 force_frame;
+ u8 reserved;
+};
+
+
+/*
+ * Debug Bus Clients
+ */
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+
+/*
+ * Debug Bus constraint operation types
+ */
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ /* equal */,
+ DBG_BUS_CONSTRAINT_OP_NE /* not equal */,
+ DBG_BUS_CONSTRAINT_OP_LT /* less than */,
+ DBG_BUS_CONSTRAINT_OP_LTC /* less than (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_LE /* less than or equal */,
+ DBG_BUS_CONSTRAINT_OP_LEC /* less than or equal (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_GT /* greater than */,
+ DBG_BUS_CONSTRAINT_OP_GTC /* greater than (cyclic) */,
+ DBG_BUS_CONSTRAINT_OP_GE /* greater than or equal */,
+ DBG_BUS_CONSTRAINT_OP_GEC /* greater than or equal (cyclic) */,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+
+/*
+ * Debug Bus memory address
+ */
+struct dbg_bus_mem_addr {
+ __le32 lo;
+ __le32 hi;
+};
+
+/*
+ * Debug Bus PCI buffer data
+ */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */;
+ struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */;
+ __le32 size /* PCI buffer size in bytes */;
+};
+
+/*
+ * Debug Bus Storm EID range filter params
+ */
+struct dbg_bus_storm_eid_range_params {
+ u8 min /* Minimal event ID to filter on */;
+ u8 max /* Maximal event ID to filter on */;
+};
+
+/*
+ * Debug Bus Storm EID mask filter params
+ */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val /* Event ID value */;
+ u8 mask /* Event ID mask. 1s in the mask = dont care bits. */;
+};
+
+/*
+ * Debug Bus Storm EID filter params
+ */
+union dbg_bus_storm_eid_params {
+/* EID range filter params */
+ struct dbg_bus_storm_eid_range_params range;
+/* EID mask filter params */
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/*
+ * Debug Bus Storm data
+ */
+struct dbg_bus_storm_data {
+/* Indicates if the Storm is enabled for fast debug recording (0/1) */
+ u8 fast_enabled;
+/* Fast debug Storm mode, valid only if fast_enabled is set */
+ u8 fast_mode;
+/* Indicates if the Storm is enabled for slow debug recording (0/1) */
+ u8 slow_enabled;
+/* Slow debug Storm mode, valid only if slow_enabled is set */
+ u8 slow_mode;
+ u8 hw_id /* HW ID associated with the Storm */;
+ u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
+/* 1 = EID range filter, 0 = EID mask filter. Valid only if eid_filter_en is
+ * set,
+ */
+ u8 eid_range_not_mask;
+ u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
+/* EID filter params to filter on. Valid only if eid_filter_en is set. */
+ union dbg_bus_storm_eid_params eid_filter_params;
+ __le16 reserved;
+/* CID to filter on. Valid only if cid_filter_en is set. */
+ __le32 cid;
+};
+
+/*
+ * Debug Bus data
+ */
+struct dbg_bus_data {
+ __le32 app_version /* The tools version number of the application */;
+ u8 state /* The current debug bus state */;
+ u8 hw_dwords /* HW dwords per cycle */;
+ u8 next_hw_id /* Next HW ID to be associated with an input */;
+ u8 num_enabled_blocks /* Number of blocks enabled for recording */;
+ u8 num_enabled_storms /* Number of Storms enabled for recording */;
+ u8 target /* Output target */;
+ u8 next_trigger_state /* ID of next trigger state to be added */;
+/* ID of next filter/trigger constraint to be added */
+ u8 next_constraint_id;
+ u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
+ u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
+/* Indicates if timestamp recording is enabled (0/1) */
+ u8 timestamp_input_en;
+ u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
+/* Indicates if the recording trigger is enabled (0/1) */
+ u8 trigger_en;
+/* If true, the next added constraint belong to the filter. Otherwise,
+ * it belongs to the last added trigger state. Valid only if either filter or
+ * triggers are enabled.
+ */
+ u8 adding_filter;
+/* Indicates if the recording filter should be applied before the trigger.
+ * Valid only if both filter and trigger are enabled (0/1)
+ */
+ u8 filter_pre_trigger;
+/* Indicates if the recording filter should be applied after the trigger.
+ * Valid only if both filter and trigger are enabled (0/1)
+ */
+ u8 filter_post_trigger;
+/* If true, all inputs are associated with HW ID 0. Otherwise, each input is
+ * assigned a different HW ID (0/1)
+ */
+ u8 unify_inputs;
+/* Indicates if the other engine sends it NW recording to this engine (0/1) */
+ u8 rcv_from_other_engine;
+/* Debug Bus PCI buffer data. Valid only when the target is
+ * DBG_BUS_TARGET_ID_PCI.
+ */
+ struct dbg_bus_pci_buf_data pci_buf;
+ __le16 reserved;
+/* Debug Bus data for each block */
+ struct dbg_bus_block_data blocks[80];
+/* Debug Bus data for each block */
+ struct dbg_bus_storm_data storms[6];
+};
+
+
+/*
+ * Debug bus filter types
+ */
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF /* filter always off */,
+ DBG_BUS_FILTER_TYPE_PRE /* filter before trigger only */,
+ DBG_BUS_FILTER_TYPE_POST /* filter after trigger only */,
+ DBG_BUS_FILTER_TYPE_ON /* filter always on */,
+ MAX_DBG_BUS_FILTER_TYPES
+};
+
+
+/*
+ * Debug bus frame modes
+ */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_0HW_4ST = 0 /* 0 HW dwords, 4 Storm dwords */,
+ DBG_BUS_FRAME_MODE_4HW_0ST = 3 /* 4 HW dwords, 0 Storm dwords */,
+ DBG_BUS_FRAME_MODE_8HW_0ST = 4 /* 8 HW dwords, 0 Storm dwords */,
+ MAX_DBG_BUS_FRAME_MODES
+};
+
+
+/*
+ * Debug bus input types
+ */
+enum dbg_bus_input_types {
+ DBG_BUS_INPUT_TYPE_STORM,
+ DBG_BUS_INPUT_TYPE_BLOCK,
+ MAX_DBG_BUS_INPUT_TYPES
+};
+
+
+
+/*
+ * Debug bus other engine mode
+ */
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
+ MAX_DBG_BUS_OTHER_ENGINE_MODES
+};
+
+
+
+/*
+ * Debug bus post-trigger recording types
+ */
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD /* start recording after trigger */,
+ DBG_BUS_POST_TRIGGER_DROP /* drop data after trigger */,
+ MAX_DBG_BUS_POST_TRIGGER_TYPES
+};
+
+
+/*
+ * Debug bus pre-trigger recording types
+ */
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_START_FROM_ZERO /* start recording from time 0 */,
+/* start recording some chunks before trigger */
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS,
+ DBG_BUS_PRE_TRIGGER_DROP /* drop data before trigger */,
+ MAX_DBG_BUS_PRE_TRIGGER_TYPES
+};
+
+
+/*
+ * Debug bus SEMI frame modes
+ */
+enum dbg_bus_semi_frame_modes {
+/* 0 slow dwords, 4 fast dwords */
+ DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = 0,
+/* 4 slow dwords, 0 fast dwords */
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = 3,
+ MAX_DBG_BUS_SEMI_FRAME_MODES
+};
+
+
+/*
+ * Debug bus states
+ */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE /* debug bus idle state (not recording) */,
+/* debug bus is ready for configuration and recording */
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING /* debug bus is currently recording */,
+ DBG_BUS_STATE_STOPPED /* debug bus recording has stopped */,
+ MAX_DBG_BUS_STATES
+};
+
+
+
+
+
+
+/*
+ * Debug Bus Storm modes
+ */
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF /* store data (fast debug) */,
+ DBG_BUS_STORM_MODE_PRAM_ADDR /* pram address (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_RW /* DRA read/write data (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_W /* DRA write data (fast debug) */,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */,
+ DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */,
+ DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */,
+ DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */,
+ DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */,
+ MAX_DBG_BUS_STORM_MODES
+};
+
+
+/*
+ * Debug bus target IDs
+ */
+enum dbg_bus_targets {
+/* records debug bus to DBG block internal buffer */
+ DBG_BUS_TARGET_ID_INT_BUF,
+ DBG_BUS_TARGET_ID_NIG /* records debug bus to the NW */,
+ DBG_BUS_TARGET_ID_PCI /* records debug bus to a PCI buffer */,
+ MAX_DBG_BUS_TARGETS
+};
+
+
+/*
+ * GRC Dump data
+ */
+struct dbg_grc_data {
+/* Value of each GRC parameter. Array size must match enum dbg_grc_params. */
+ __le32 param_val[40];
+/* Indicates for each GRC parameter if it was set by the user (0/1).
+ * Array size must match the enum dbg_grc_params.
+ */
+ u8 param_set_by_user[40];
+};
+
+
+/*
+ * Debug GRC params
+ */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM /* dump Tstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MSTORM /* dump Mstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_USTORM /* dump Ustorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_XSTORM /* dump Xstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_YSTORM /* dump Ystorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PSTORM /* dump Pstorm memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_REGS /* dump non-memory registers (0/1) */,
+ DBG_GRC_PARAM_DUMP_RAM /* dump Storm internal RAMs (0/1) */,
+ DBG_GRC_PARAM_DUMP_PBUF /* dump Storm passive buffer (0/1) */,
+ DBG_GRC_PARAM_DUMP_IOR /* dump Storm IORs (0/1) */,
+ DBG_GRC_PARAM_DUMP_VFC /* dump VFC memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_CM_CTX /* dump CM contexts (0/1) */,
+ DBG_GRC_PARAM_DUMP_PXP /* dump PXP memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_RSS /* dump RSS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */,
+ DBG_GRC_PARAM_RESERVED /* reserved */,
+ DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_NIG /* dump NIG memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_TM /* dump TM (timers) memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_SDM /* dump SDM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_DIF /* dump DIF memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */,
+ DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */,
+ DBG_GRC_PARAM_NUM_LCIDS /* number of LCIDs (0..320) */,
+ DBG_GRC_PARAM_NUM_LTIDS /* number of LTIDs (0..320) */,
+/* preset: exclude all memories from dump (1 only) */
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+/* preset: include memories for crash dump (1 only) */
+ DBG_GRC_PARAM_CRASH,
+/* perform dump only if MFW is responding (0/1) */
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
+ DBG_GRC_PARAM_DUMP_PHY /* dump PHY memories (0/1) */,
+ MAX_DBG_GRC_PARAMS
+};
+
+
+/*
+ * Debug reset registers
+ */
+enum dbg_reset_regs {
+ DBG_RESET_REG_MISCS_PL_UA,
+ DBG_RESET_REG_MISCS_PL_HV,
+ DBG_RESET_REG_MISCS_PL_HV_2,
+ DBG_RESET_REG_MISC_PL_UA,
+ DBG_RESET_REG_MISC_PL_HV,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
+ DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
+ DBG_RESET_REG_MISC_PL_PDA_VAUX,
+ MAX_DBG_RESET_REGS
+};
+
+
+/*
+ * Debug status codes
+ */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_TOO_MANY_INPUTS,
+ DBG_STATUS_INPUT_OVERLAP,
+ DBG_STATUS_HW_ONLY_RECORDING,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_64B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
+ MAX_DBG_STATUS
+};
+
+
+/*
+ * Debug Storms IDs
+ */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+
+/*
+ * Idle Check data
+ */
+struct idle_chk_data {
+ __le32 buf_size /* Idle check buffer size in dwords */;
+/* Indicates if the idle check buffer size was set (0/1) */
+ u8 buf_size_set;
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/*
+ * Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+ struct dbg_grc_data grc /* GRC Dump data */;
+ struct dbg_bus_data bus /* Debug Bus data */;
+ struct idle_chk_data idle_chk /* Idle Check data */;
+ u8 mode_enable[40] /* Indicates if a mode is enabled (0/1) */;
+/* Indicates if a block is in reset state (0/1) */
+ u8 block_in_reset[80];
+ u8 chip_id /* Chip ID (from enum chip_ids) */;
+ u8 platform_id /* Platform ID (from enum platform_ids) */;
+ u8 initialized /* Indicates if the data was initialized */;
+ u8 reserved;
+};
+
+
+#endif /* __ECORE_HSI_DEBUG_TOOLS__ */
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index 80f4165f..e26c1833 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -38,219 +38,315 @@ struct xstorm_eth_conn_ag_ctx {
u8 reserved0 /* cdu_validation */;
u8 eth_state /* state */;
u8 flags0;
+/* exist_in_qm0 */
#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+/* bit6 */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+/* bit7 */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
+/* bit8 */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+/* bit9 */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
+/* timer0cf */
#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
+/* cf4 */
#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+/* cf5 */
#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+/* cf6 */
#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+/* cf7 */
#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
+/* cf8 */
#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+/* cf9 */
#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+/* cf10 */
#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+/* cf11 */
#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
+/* cf12 */
#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+/* cf13 */
#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+/* cf14 */
#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+/* cf15 */
#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
+/* cf16 */
#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+/* cf18 */
#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+/* cf19 */
#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
+/* cf20 */
#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+/* cf21 */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+/* cf22 */
#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
+/* cf2en */
#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
+/* cf10en */
#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
+/* cf18en */
#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
+/* rule2en */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
+/* rule10en */
#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
+/* rule18en */
#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
+/* bit16 */
#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+/* cf23 */
#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
- __le16 word1 /* physical_q1 */;
+ __le16 quota /* physical_q1 */;
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
- __le16 go_to_bd_cons /* word5 */;
+ __le16 tx_class /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
@@ -306,36 +402,46 @@ struct ystorm_eth_conn_st_ctx {
struct ystorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
+ u8 state /* state */;
u8 flags0;
+/* exist_in_qm0 */
#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+/* exist_in_qm1 */
#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
+/* cf0en */
#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+/* cf1en */
#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+/* cf2en */
#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+/* rule0en */
#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+/* rule1en */
#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+/* rule2en */
#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+/* rule3en */
#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+/* rule4en */
#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- u8 byte2 /* byte2 */;
+ u8 tx_q0_int_coallecing_timeset /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
__le32 terminate_spqe /* reg0 */;
@@ -352,84 +458,84 @@ struct tstorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
@@ -456,57 +562,82 @@ struct ustorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
+/* exist_in_qm0 */
#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+/* exist_in_qm1 */
#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+/* timer0cf */
#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+/* timer1cf */
#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+/* timer2cf */
#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
+/* timer_stop_all */
#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+/* cf4 */
#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+/* cf5 */
#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+/* cf6 */
#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
+/* cf0en */
#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+/* cf1en */
#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+/* cf2en */
#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+/* cf3en */
#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+/* cf4en */
#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+/* cf5en */
#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+/* cf6en */
#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+/* rule0en */
#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
+/* rule1en */
#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+/* rule2en */
#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+/* rule3en */
#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+/* rule4en */
#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+/* rule5en */
#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+/* rule6en */
#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+/* rule7en */
#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+/* rule8en */
#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2 /* byte2 */;
@@ -539,83 +670,79 @@ struct mstorm_eth_conn_st_ctx {
* eth connection context
*/
struct eth_conn_context {
- struct tstorm_eth_conn_st_ctx tstorm_st_context
- /* tstorm storm context */;
+/* tstorm storm context */
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2] /* padding */;
- struct pstorm_eth_conn_st_ctx pstorm_st_context
- /* pstorm storm context */;
- struct xstorm_eth_conn_st_ctx xstorm_st_context
- /* xstorm storm context */;
- struct xstorm_eth_conn_ag_ctx xstorm_ag_context
- /* xstorm aggregative context */;
- struct ystorm_eth_conn_st_ctx ystorm_st_context
- /* ystorm storm context */;
- struct ystorm_eth_conn_ag_ctx ystorm_ag_context
- /* ystorm aggregative context */;
- struct tstorm_eth_conn_ag_ctx tstorm_ag_context
- /* tstorm aggregative context */;
- struct ustorm_eth_conn_ag_ctx ustorm_ag_context
- /* ustorm aggregative context */;
- struct ustorm_eth_conn_st_ctx ustorm_st_context
- /* ustorm storm context */;
- struct mstorm_eth_conn_st_ctx mstorm_st_context
- /* mstorm storm context */;
+/* pstorm storm context */
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+/* xstorm storm context */
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+/* xstorm aggregative context */
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+/* ystorm storm context */
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+/* ystorm aggregative context */
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+/* tstorm aggregative context */
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+/* ustorm aggregative context */
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+/* ustorm storm context */
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+/* mstorm storm context */
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
+
/*
* Ethernet filter types: mac/vlan/pair
*/
enum eth_error_code {
ETH_OK = 0x00 /* command succeeded */,
- ETH_FILTERS_MAC_ADD_FAIL_FULL
- /* mac add filters command failed due to cam full state */,
- ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2
- /* mac add filters command failed due to mtt2 full state */,
- ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2
- /* mac add filters command failed due to duplicate mac address */,
- ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2
- /* mac add filters command failed due to duplicate mac address */,
- ETH_FILTERS_MAC_DEL_FAIL_NOF
- /* mac delete filters command failed due to not found state */,
- ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2
- /* mac delete filters command failed due to not found state */,
- ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2
- /* mac delete filters command failed due to not found state */,
- ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC
- /* mac add filters command failed due to MAC Address of
- * 00:00:00:00:00:00
- */
- ,
- ETH_FILTERS_VLAN_ADD_FAIL_FULL
- /* vlan add filters command failed due to cam full state */,
- ETH_FILTERS_VLAN_ADD_FAIL_DUP
- /* vlan add filters command failed due to duplicate VLAN filter */,
- ETH_FILTERS_VLAN_DEL_FAIL_NOF
- /* vlan delete filters command failed due to not found state */,
- ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1
- /* vlan delete filters command failed due to not found state */,
- ETH_FILTERS_PAIR_ADD_FAIL_DUP
- /* pair add filters command failed due to duplicate request */,
- ETH_FILTERS_PAIR_ADD_FAIL_FULL
- /* pair add filters command failed due to full state */,
- ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC
- /* pair add filters command failed due to full state */,
- ETH_FILTERS_PAIR_DEL_FAIL_NOF
- /* pair add filters command failed due not found state */,
- ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1
- /* pair add filters command failed due not found state */,
- ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC
- /* pair add filters command failed due to MAC Address of
- * 00:00:00:00:00:00
- */
- ,
- ETH_FILTERS_VNI_ADD_FAIL_FULL
- /* vni add filters command failed due to cam full state */,
- ETH_FILTERS_VNI_ADD_FAIL_DUP
- /* vni add filters command failed due to duplicate VNI filter */,
+/* mac add filters command failed due to cam full state */
+ ETH_FILTERS_MAC_ADD_FAIL_FULL,
+/* mac add filters command failed due to mtt2 full state */
+ ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2,
+/* mac add filters command failed due to duplicate mac address */
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2,
+/* mac add filters command failed due to duplicate mac address */
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2,
+/* mac delete filters command failed due to not found state */
+ ETH_FILTERS_MAC_DEL_FAIL_NOF,
+/* mac delete filters command failed due to not found state */
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2,
+/* mac delete filters command failed due to not found state */
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2,
+/* mac add filters command failed due to MAC Address of 00:00:00:00:00:00 */
+ ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC,
+/* vlan add filters command failed due to cam full state */
+ ETH_FILTERS_VLAN_ADD_FAIL_FULL,
+/* vlan add filters command failed due to duplicate VLAN filter */
+ ETH_FILTERS_VLAN_ADD_FAIL_DUP,
+/* vlan delete filters command failed due to not found state */
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF,
+/* vlan delete filters command failed due to not found state */
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1,
+/* pair add filters command failed due to duplicate request */
+ ETH_FILTERS_PAIR_ADD_FAIL_DUP,
+/* pair add filters command failed due to full state */
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL,
+/* pair add filters command failed due to full state */
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC,
+/* pair add filters command failed due not found state */
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF,
+/* pair add filters command failed due not found state */
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1,
+/* pair add filters command failed due to MAC Address of 00:00:00:00:00:00 */
+ ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
+/* vni add filters command failed due to cam full state */
+ ETH_FILTERS_VNI_ADD_FAIL_FULL,
+/* vni add filters command failed due to duplicate VNI filter */
+ ETH_FILTERS_VNI_ADD_FAIL_DUP,
MAX_ETH_ERROR_CODE
};
+
/*
* opcodes for the event ring
*/
@@ -635,12 +762,12 @@ enum eth_event_opcode {
ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
ETH_EVENT_RX_ADD_UDP_FILTER,
ETH_EVENT_RX_DELETE_UDP_FILTER,
- ETH_EVENT_RX_ADD_GFT_FILTER,
- ETH_EVENT_RX_DELETE_GFT_FILTER,
ETH_EVENT_RX_CREATE_GFT_ACTION,
+ ETH_EVENT_RX_GFT_UPDATE_FILTER,
MAX_ETH_EVENT_OPCODE
};
+
/*
* Classify rule types in E2/E3
*/
@@ -648,11 +775,12 @@ enum eth_filter_action {
ETH_FILTER_ACTION_UNUSED,
ETH_FILTER_ACTION_REMOVE,
ETH_FILTER_ACTION_ADD,
- ETH_FILTER_ACTION_REMOVE_ALL
- /* Remove all filters of given type and vport ID. */,
+/* Remove all filters of given type and vport ID. */
+ ETH_FILTER_ACTION_REMOVE_ALL,
MAX_ETH_FILTER_ACTION
};
+
/*
* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$
*/
@@ -668,6 +796,7 @@ struct eth_filter_cmd {
__le16 vlan_id;
};
+
/*
* $$KEEP_ENDIANNESS$$
*/
@@ -675,10 +804,14 @@ struct eth_filter_cmd_header {
u8 rx /* If set, apply these commands to the RX path */;
u8 tx /* If set, apply these commands to the TX path */;
u8 cmd_cnt /* Number of filter commands */;
+/* 0 - dont assert in case of filter configuration error. Just return an error
+ * code. 1 - assert in case of filter configuration error.
+ */
u8 assert_on_error;
u8 reserved1[4];
};
+
/*
* Ethernet filter types: mac/vlan/pair
*/
@@ -690,25 +823,27 @@ enum eth_filter_type {
ETH_FILTER_TYPE_INNER_MAC /* Add/remove a inner MAC address */,
ETH_FILTER_TYPE_INNER_VLAN /* Add/remove a inner VLAN */,
ETH_FILTER_TYPE_INNER_PAIR /* Add/remove a inner MAC-VLAN pair */,
- ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR /* Add/remove a inner MAC-VNI pair */
- ,
+/* Add/remove a inner MAC-VNI pair */
+ ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
ETH_FILTER_TYPE_MAC_VNI_PAIR /* Add/remove a MAC-VNI pair */,
ETH_FILTER_TYPE_VNI /* Add/remove a VNI */,
MAX_ETH_FILTER_TYPE
};
+
/*
* eth IPv4 Fragment Type
*/
enum eth_ipv4_frag_type {
ETH_IPV4_NOT_FRAG /* IPV4 Packet Not Fragmented */,
- ETH_IPV4_FIRST_FRAG
- /* First Fragment of IPv4 Packet (contains headers) */,
- ETH_IPV4_NON_FIRST_FRAG
- /* Non-First Fragment of IPv4 Packet (does not contain headers) */,
+/* First Fragment of IPv4 Packet (contains headers) */
+ ETH_IPV4_FIRST_FRAG,
+/* Non-First Fragment of IPv4 Packet (does not contain headers) */
+ ETH_IPV4_NON_FIRST_FRAG,
MAX_ETH_IPV4_FRAG_TYPE
};
+
/*
* eth IPv4 Fragment Type
*/
@@ -718,6 +853,7 @@ enum eth_ip_type {
MAX_ETH_IP_TYPE
};
+
/*
* Ethernet Ramrod Command IDs
*/
@@ -732,91 +868,117 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
- ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION
- /* RX - Create an Openflow Action */,
- ETH_RAMROD_RX_ADD_OPENFLOW_FILTER
- /* RX - Add an Openflow Filter to the Searcher */,
- ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER
- /* RX - Delete an Openflow Filter to the Searcher */,
- ETH_RAMROD_RX_ADD_UDP_FILTER /* RX - Add a UDP Filter to the Searcher */
- ,
- ETH_RAMROD_RX_DELETE_UDP_FILTER
- /* RX - Delete a UDP Filter to the Searcher */,
- ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create an Gft Action */,
- ETH_RAMROD_RX_DELETE_GFT_FILTER
- /* RX - Delete an GFT Filter to the Searcher */,
- ETH_RAMROD_RX_ADD_GFT_FILTER
- /* RX - Add an GFT Filter to the Searcher */,
+/* RX - Create an Openflow Action */
+ ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION,
+/* RX - Add an Openflow Filter to the Searcher */
+ ETH_RAMROD_RX_ADD_OPENFLOW_FILTER,
+/* RX - Delete an Openflow Filter to the Searcher */
+ ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER,
+/* RX - Add a UDP Filter to the Searcher */
+ ETH_RAMROD_RX_ADD_UDP_FILTER,
+/* RX - Delete a UDP Filter to the Searcher */
+ ETH_RAMROD_RX_DELETE_UDP_FILTER,
+ ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create a Gft Action */,
+/* RX - Add/Delete a GFT Filter to the Searcher */
+ ETH_RAMROD_GFT_UPDATE_FILTER,
MAX_ETH_RAMROD_CMD_ID
};
+
/*
* return code from eth sp ramrods
*/
struct eth_return_code {
u8 value;
+/* error code (use enum eth_error_code) */
#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
#define ETH_RETURN_CODE_RESERVED_MASK 0x3
#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+/* rx path - 0, tx path - 1 */
#define ETH_RETURN_CODE_RX_TX_MASK 0x1
#define ETH_RETURN_CODE_RX_TX_SHIFT 7
};
+
/*
* What to do in case an error occurs
*/
enum eth_tx_err {
ETH_TX_ERR_DROP /* Drop erroneous packet. */,
- ETH_TX_ERR_ASSERT_MALICIOUS
- /* Assert an interrupt for PF, declare as malicious for VF */,
+/* Assert an interrupt for PF, declare as malicious for VF */
+ ETH_TX_ERR_ASSERT_MALICIOUS,
MAX_ETH_TX_ERR
};
+
/*
* Array of the different error type behaviors
*/
struct eth_tx_err_vals {
__le16 values;
+/* Wrong VLAN insertion mode (use enum eth_tx_err) */
#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
+/* Packet is below minimal size (use enum eth_tx_err) */
#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
+/* Vport has sent spoofed packet (use enum eth_tx_err) */
#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
+/* Packet with illegal type of inband tag (use enum eth_tx_err) */
#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
+/* Packet marked for VLAN insertion when inband tag is present
+ * (use enum eth_tx_err)
+ */
#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+/* Non LSO packet larger than MTU (use enum eth_tx_err) */
#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
+/* VF/PF has sent LLDP/PFC or any other type of control packet which is not
+ * allowed to (use enum eth_tx_err)
+ */
#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
};
+
/*
* vport rss configuration data
*/
struct eth_vport_rss_config {
__le16 capabilities;
+/* configuration of the IpV4 2-tuple capability */
#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+/* configuration of the IpV6 2-tuple capability */
#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+/* configuration of the IpV4 4-tuple capability for TCP */
#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+/* configuration of the IpV6 4-tuple capability for TCP */
#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+/* configuration of the IpV4 4-tuple capability for UDP */
#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+/* configuration of the IpV6 4-tuple capability for UDP */
#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+/* configuration of the 5-tuple capability */
#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+/* if set update the rss keys */
#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
+/* The RSS engine ID. Must be allocated to each vport with RSS enabled.
+ * Total number of RSS engines is ETH_RSS_ENGINE_NUM_ , according to chip type.
+ */
u8 rss_id;
u8 rss_mode /* The RSS mode for this function */;
u8 update_rss_key /* if set update the rss key */;
@@ -824,13 +986,14 @@ struct eth_vport_rss_config {
u8 update_rss_capabilities /* if set update the capabilities */;
u8 tbl_size /* rss mask (Tbl size) */;
__le32 reserved2[2];
- __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]
- /* RSS indirection table */;
- __le32 rss_key[ETH_RSS_KEY_SIZE_REGS] /* RSS key supplied to us by OS */
- ;
+/* RSS indirection table */
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+/* RSS key supplied to us by OS */
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
__le32 reserved3[2];
};
+
/*
* eth vport RSS mode
*/
@@ -840,21 +1003,28 @@ enum eth_vport_rss_mode {
MAX_ETH_VPORT_RSS_MODE
};
+
/*
* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
*/
struct eth_vport_rx_mode {
__le16 state;
+/* drop all unicast packets */
#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+/* accept all unicast packets (subject to vlan) */
#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+/* accept all unmatched unicast packets */
#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+/* drop all multicast packets */
#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+/* accept all multicast packets (subject to vlan) */
#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+/* accept all broadcast packets (subject to vlan) */
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
@@ -862,6 +1032,7 @@ struct eth_vport_rx_mode {
__le16 reserved2[3];
};
+
/*
* Command for setting tpa parameters
*/
@@ -870,39 +1041,45 @@ struct eth_vport_tpa_param {
u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */;
u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */;
u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */;
+/* If set, start each tpa segment on new SGE (GRO mode). One SGE per segment
+ * allowed
+ */
u8 tpa_pkt_split_flg;
- u8 tpa_hdr_data_split_flg
- /* If set, put header of first TPA segment on bd and data on SGE */
- ;
- u8 tpa_gro_consistent_flg
- /* If set, GRO data consistent will checked for TPA continue */;
- u8 tpa_max_aggs_num
- /* maximum number of opened aggregations per v-port */;
+/* If set, put header of first TPA segment on bd and data on SGE */
+ u8 tpa_hdr_data_split_flg;
+/* If set, GRO data consistent will checked for TPA continue */
+ u8 tpa_gro_consistent_flg;
+/* maximum number of opened aggregations per v-port */
+ u8 tpa_max_aggs_num;
__le16 tpa_max_size /* maximal size for the aggregated TPA packets */;
- __le16 tpa_min_size_to_start
- /* minimum TCP payload size for a packet to start aggregation */;
- __le16 tpa_min_size_to_cont
- /* minimum TCP payload size for a packet to continue aggregation */
- ;
- u8 max_buff_num
- /* maximal number of buffers that can be used for one aggregation */
- ;
+/* minimum TCP payload size for a packet to start aggregation */
+ __le16 tpa_min_size_to_start;
+/* minimum TCP payload size for a packet to continue aggregation */
+ __le16 tpa_min_size_to_cont;
+/* maximal number of buffers that can be used for one aggregation */
+ u8 max_buff_num;
u8 reserved;
};
+
/*
* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$
*/
struct eth_vport_tx_mode {
__le16 state;
+/* drop all unicast packets */
#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+/* accept all unicast packets (subject to vlan) */
#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+/* drop all multicast packets */
#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+/* accept all multicast packets (subject to vlan) */
#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+/* accept all broadcast packets (subject to vlan) */
#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
@@ -910,17 +1087,29 @@ struct eth_vport_tx_mode {
__le16 reserved2[3];
};
+
/*
- * Ramrod data for rx add gft filter data
+ * Ramrod data for rx create gft action
*/
-struct rx_add_gft_filter_data {
- struct regpair pkt_hdr_addr /* Packet Header That Defines GFT Filter */
- ;
- __le16 action_icid /* ICID of Action to run for this filter */;
- __le16 pkt_hdr_length /* Packet Header Length */;
- u8 reserved[4];
+enum gft_filter_update_action {
+ GFT_ADD_FILTER,
+ GFT_DELETE_FILTER,
+ MAX_GFT_FILTER_UPDATE_ACTION
+};
+
+
+/*
+ * Ramrod data for rx create gft action
+ */
+enum gft_logic_filter_type {
+ GFT_FILTER_TYPE /* flow FW is GFT-logic as well */,
+ RFS_FILTER_TYPE /* flow FW is A-RFS-logic */,
+ MAX_GFT_LOGIC_FILTER_TYPE
};
+
+
+
/*
* Ramrod data for rx add openflow filter
*/
@@ -929,10 +1118,12 @@ struct rx_add_openflow_filter_data {
u8 priority /* Searcher String - Packet priority */;
u8 reserved0;
__le32 tenant_id /* Searcher String - Tenant ID */;
- __le16 dst_mac_hi /* Searcher String - Destination Mac Bytes 0 to 1 */;
- __le16 dst_mac_mid /* Searcher String - Destination Mac Bytes 2 to 3 */
- ;
- __le16 dst_mac_lo /* Searcher String - Destination Mac Bytes 4 to 5 */;
+/* Searcher String - Destination Mac Bytes 0 to 1 */
+ __le16 dst_mac_hi;
+/* Searcher String - Destination Mac Bytes 2 to 3 */
+ __le16 dst_mac_mid;
+/* Searcher String - Destination Mac Bytes 4 to 5 */
+ __le16 dst_mac_lo;
__le16 src_mac_hi /* Searcher String - Source Mac 0 to 1 */;
__le16 src_mac_mid /* Searcher String - Source Mac 2 to 3 */;
__le16 src_mac_lo /* Searcher String - Source Mac 4 to 5 */;
@@ -948,6 +1139,7 @@ struct rx_add_openflow_filter_data {
__le16 l4_src_port /* Searcher String - TCP/UDP Source Port */;
};
+
/*
* Ramrod data for rx create gft action
*/
@@ -956,6 +1148,7 @@ struct rx_create_gft_action_data {
u8 reserved[7];
};
+
/*
* Ramrod data for rx create openflow action
*/
@@ -964,6 +1157,7 @@ struct rx_create_openflow_action_data {
u8 reserved[7];
};
+
/*
* Ramrod data for rx queue start ramrod
*/
@@ -980,25 +1174,35 @@ struct rx_queue_start_ramrod_data {
u8 stats_counter_id /* Statistics counter ID */;
u8 pin_context /* Pin context in CCFC to improve performance */;
u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD/SGE fetch */;
- u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet placement */
- ;
- u8 pxp_st_hint
- /* PXP command Steering tag hint. Use enum pxp_tph_st_hint */;
+/* PXP command TPH Valid - for packet placement */
+ u8 pxp_tph_valid_pkt;
+/* PXP command Steering tag hint. Use enum pxp_tph_st_hint */
+ u8 pxp_st_hint;
__le16 pxp_st_index /* PXP command Steering tag index */;
- u8 pmd_mode
- /* Indicates that current queue belongs to poll-mode driver */;
+/* Indicates that current queue belongs to poll-mode driver */
+ u8 pmd_mode;
+/* Indicates that the current queue is using the TX notification queue
+ * mechanism - should be set only for PMD queue
+ */
u8 notify_en;
- u8 toggle_val
- /* Initial value for the toggle valid bit - used in PMD mode */;
- u8 reserved[7];
+/* Initial value for the toggle valid bit - used in PMD mode */
+ u8 toggle_val;
+/* Index of RX producers in VF zone. Used for VF only. */
+ u8 vf_rx_prod_index;
+/* Backward compatibility mode. If set, unprotected mStorm queue zone will used
+ * for VF RX producers instead of VF zone.
+ */
+ u8 vf_rx_prod_use_zone_a;
+ u8 reserved[5];
__le16 reserved1 /* FW reserved. */;
struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */;
struct regpair bd_base /* bd address of the first bd page */;
struct regpair reserved2 /* FW reserved. */;
};
+
/*
- * Ramrod data for rx queue start ramrod
+ * Ramrod data for rx queue stop ramrod
*/
struct rx_queue_stop_ramrod_data {
__le16 rx_queue_id /* ID of RX queue */;
@@ -1008,6 +1212,7 @@ struct rx_queue_stop_ramrod_data {
u8 reserved[3];
};
+
/*
* Ramrod data for rx queue update ramrod
*/
@@ -1025,6 +1230,7 @@ struct rx_queue_update_ramrod_data {
struct regpair reserved6 /* FW reserved. */;
};
+
/*
* Ramrod data for rx Add UDP Filter
*/
@@ -1034,38 +1240,69 @@ struct rx_udp_filter_data {
u8 ip_type /* Searcher String - IP Type */;
u8 tenant_id_exists /* Searcher String - Tenant ID Exists */;
__le16 reserved1;
+/* Searcher String - IP Destination Address, for IPv4 use ip_dst_addr[0] only */
__le32 ip_dst_addr[4];
- /* Searcher String-IP Dest Addr for IPv4 use ip_dst_addr[0] only */
- ;
- __le32 ip_src_addr[4]
- /* Searcher String-IP Src Addr, for IPv4 use ip_dst_addr[0] only */
- ;
+/* Searcher String - IP Source Address, for IPv4 use ip_dst_addr[0] only */
+ __le32 ip_src_addr[4];
__le16 udp_dst_port /* Searcher String - UDP Destination Port */;
__le16 udp_src_port /* Searcher String - UDP Source Port */;
__le32 tenant_id /* Searcher String - Tenant ID */;
};
+
/*
- * Ramrod data for rx queue start ramrod
+ * Ramrod to add filter - filter is packet headr of type of packet wished to
+ * pass certin FW flow
+ */
+struct rx_update_gft_filter_data {
+/* Pointer to Packet Header That Defines GFT Filter */
+ struct regpair pkt_hdr_addr;
+ __le16 pkt_hdr_length /* Packet Header Length */;
+/* If is_rfs flag is set: Queue Id to associate filter with else: action icid */
+ __le16 rx_qid_or_action_icid;
+/* Field is used if is_rfs flag is set: vport Id of which to associate filter
+ * with
+ */
+ u8 vport_id;
+/* Use enum to set type of flow using gft HW logic blocks */
+ u8 filter_type;
+ u8 filter_action /* Use to set type of action on filter */;
+ u8 reserved;
+};
+
+
+
+/*
+ * Ramrod data for tx queue start ramrod
*/
struct tx_queue_start_ramrod_data {
__le16 sb_id /* Status block ID */;
u8 sb_index /* Status block protocol index */;
u8 vport_id /* VPort ID */;
- u8 reserved0 /* FW reserved. */;
+ u8 reserved0 /* FW reserved. (qcn_rl_en) */;
u8 stats_counter_id /* Statistics counter ID to use */;
__le16 qm_pq_id /* QM PQ ID */;
u8 flags;
+/* 0: Enable QM opportunistic flow. 1: Disable QM opportunistic flow */
#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+/* If set, Test Mode - packets will be duplicated by Xstorm handler */
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+/* If set, Test Mode - packets destination will be determined by dest_port_mode
+ * field from Tx BD
+ */
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+/* Indicates that current queue belongs to poll-mode driver */
#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+/* Indicates that the current queue is using the TX notification queue
+ * mechanism - should be set only for PMD queue
+ */
#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+/* Pin context in CCFC to improve performance */
#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
@@ -1074,17 +1311,25 @@ struct tx_queue_start_ramrod_data {
u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD fetch */;
u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet fetch */;
__le16 pxp_st_index /* PXP command Steering tag index */;
- __le16 comp_agg_size /* TX completion min agg size - for PMD queues */;
+/* TX completion min agg size - for PMD queues */
+ __le16 comp_agg_size;
__le16 queue_zone_id /* queue zone ID to use */;
- __le16 test_dup_count /* In Test Mode, number of duplications */;
+ __le16 reserved2 /* FW reserved. (test_dup_count) */;
__le16 pbl_size /* Number of BD pages pointed by PBL */;
- __le16 tx_queue_id
- /* unique Queue ID - currently used only by PMD flow */;
+/* unique Queue ID - currently used only by PMD flow */
+ __le16 tx_queue_id;
+/* Unique Same-As-Last Resource ID - improves performance for same-as-last
+ * packets per connection (range 0..ETH_TX_NUM_SAME_AS_LAST_ENTRIES-1 IDs
+ * available)
+ */
+ __le16 same_as_last_id;
+ __le16 reserved[3];
struct regpair pbl_base_addr /* address of the pbl page */;
- struct regpair bd_cons_address
- /* BD consumer address in host - for PMD queues */;
+/* BD consumer address in host - for PMD queues */
+ struct regpair bd_cons_address;
};
+
/*
* Ramrod data for tx queue stop ramrod
*/
@@ -1092,16 +1337,19 @@ struct tx_queue_stop_ramrod_data {
__le16 reserved[4];
};
+
+
/*
* Ramrod data for vport update ramrod
*/
struct vport_filter_update_ramrod_data {
- struct eth_filter_cmd_header filter_cmd_hdr
- /* Header for Filter Commands (RX/TX, Add/Remove/Replace, etc) */;
- struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]
- /* Filter Commands */;
+/* Header for Filter Commands (RX/TX, Add/Remove/Replace, etc) */
+ struct eth_filter_cmd_header filter_cmd_hdr;
+/* Filter Commands */
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
};
+
/*
* Ramrod data for vport start ramrod
*/
@@ -1113,25 +1361,35 @@ struct vport_start_ramrod_data {
u8 inner_vlan_removal_en;
struct eth_vport_rx_mode rx_mode /* Rx filter data */;
struct eth_vport_tx_mode tx_mode /* Tx filter data */;
- struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */
- ;
+/* TPA configuration parameters */
+ struct eth_vport_tpa_param tpa_param;
__le16 default_vlan /* Default Vlan value to be forced by FW */;
u8 tx_switching_en /* Tx switching is enabled for current Vport */;
- u8 anti_spoofing_en
- /* Anti-spoofing verification is set for current Vport */;
- u8 default_vlan_en
- /* If set, the default Vlan value is forced by the FW */;
- u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */
- ;
+/* Anti-spoofing verification is set for current Vport */
+ u8 anti_spoofing_en;
+/* If set, the default Vlan value is forced by the FW */
+ u8 default_vlan_en;
+/* If set, the vport handles PTP Timesync Packets */
+ u8 handle_ptp_pkts;
+/* If enable then innerVlan will be striped and not written to cqe */
u8 silent_vlan_removal_en;
- /* If enable then innerVlan will be striped and not written to cqe */
+/* If set untagged filter (vlan0) is added to current Vport, otherwise port is
+ * marked as any-vlan
+ */
u8 untagged;
- struct eth_tx_err_vals tx_err_behav
- /* Desired behavior per TX error type */;
+/* Desired behavior per TX error type */
+ struct eth_tx_err_vals tx_err_behav;
+/* If set, ETH header padding will not inserted. placement_offset will be zero.
+ */
u8 zero_placement_offset;
- u8 reserved[7];
+/* If set, Contorl frames will be filtered according to MAC check. */
+ u8 ctl_frame_mac_check_en;
+/* If set, Contorl frames will be filtered according to ethtype check. */
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[5];
};
+
/*
* Ramrod data for vport stop ramrod
*/
@@ -1140,6 +1398,7 @@ struct vport_stop_ramrod_data {
u8 reserved[7];
};
+
/*
* Ramrod data for vport update ramrod
*/
@@ -1151,37 +1410,41 @@ struct vport_update_ramrod_data_cmn {
u8 tx_active_flg /* tx active flag value */;
u8 update_rx_mode_flg /* set if rx state data should be handled */;
u8 update_tx_mode_flg /* set if tx state data should be handled */;
- u8 update_approx_mcast_flg
- /* set if approx. mcast data should be handled */;
+/* set if approx. mcast data should be handled */
+ u8 update_approx_mcast_flg;
u8 update_rss_flg /* set if rss data should be handled */;
- u8 update_inner_vlan_removal_en_flg
- /* set if inner_vlan_removal_en should be handled */;
+/* set if inner_vlan_removal_en should be handled */
+ u8 update_inner_vlan_removal_en_flg;
u8 inner_vlan_removal_en;
+/* set if tpa parameters should be handled, TPA must be disable before */
u8 update_tpa_param_flg;
u8 update_tpa_en_flg /* set if tpa enable changes */;
- u8 update_tx_switching_en_flg
- /* set if tx switching en flag should be handled */;
+/* set if tx switching en flag should be handled */
+ u8 update_tx_switching_en_flg;
u8 tx_switching_en /* tx switching en value */;
- u8 update_anti_spoofing_en_flg
- /* set if anti spoofing flag should be handled */;
+/* set if anti spoofing flag should be handled */
+ u8 update_anti_spoofing_en_flg;
u8 anti_spoofing_en /* Anti-spoofing verification en value */;
- u8 update_handle_ptp_pkts
- /* set if handle_ptp_pkts should be handled. */;
- u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */
- ;
- u8 update_default_vlan_en_flg
- /* If set, the default Vlan enable flag is updated */;
- u8 default_vlan_en
- /* If set, the default Vlan value is forced by the FW */;
- u8 update_default_vlan_flg
- /* If set, the default Vlan value is updated */;
+/* set if handle_ptp_pkts should be handled. */
+ u8 update_handle_ptp_pkts;
+/* If set, the vport handles PTP Timesync Packets */
+ u8 handle_ptp_pkts;
+/* If set, the default Vlan enable flag is updated */
+ u8 update_default_vlan_en_flg;
+/* If set, the default Vlan value is forced by the FW */
+ u8 default_vlan_en;
+/* If set, the default Vlan value is updated */
+ u8 update_default_vlan_flg;
__le16 default_vlan /* Default Vlan value to be forced by FW */;
- u8 update_accept_any_vlan_flg
- /* set if accept_any_vlan should be handled */;
+/* set if accept_any_vlan should be handled */
+ u8 update_accept_any_vlan_flg;
u8 accept_any_vlan /* accept_any_vlan updated value */;
+/* Set to remove vlan silently, update_inner_vlan_removal_en_flg must be enabled
+ * as well. If Rx is in noSgl mode send rx_queue_update_ramrod_data
+ */
u8 silent_vlan_removal_en;
- u8 update_mtu_flg
- /* If set, MTU will be updated. Vport must be not active. */;
+/* If set, MTU will be updated. Vport must be not active. */
+ u8 update_mtu_flg;
__le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
u8 reserved[2];
};
@@ -1194,54 +1457,76 @@ struct vport_update_ramrod_mcast {
* Ramrod data for vport update ramrod
*/
struct vport_update_ramrod_data {
- struct vport_update_ramrod_data_cmn common
- /* Common data for all vport update ramrods */;
+/* Common data for all vport update ramrods */
+ struct vport_update_ramrod_data_cmn common;
struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */;
struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */;
- struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */
- ;
+/* TPA configuration parameters */
+ struct eth_vport_tpa_param tpa_param;
struct vport_update_ramrod_mcast approx_mcast;
struct eth_vport_rss_config rss_config /* rss config data */;
};
+
+
+
+
+
/*
* GFT CAM line struct
*/
struct gft_cam_line {
__le32 camline;
+/* Indication if the line is valid. */
#define GFT_CAM_LINE_VALID_MASK 0x1
#define GFT_CAM_LINE_VALID_SHIFT 0
+/* Data bits, the word that compared with the profile key */
#define GFT_CAM_LINE_DATA_MASK 0x3FFF
#define GFT_CAM_LINE_DATA_SHIFT 1
+/* Mask bits, indicate the bits in the data that are Dont-Care */
#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF
#define GFT_CAM_LINE_MASK_BITS_SHIFT 15
#define GFT_CAM_LINE_RESERVED1_MASK 0x7
#define GFT_CAM_LINE_RESERVED1_SHIFT 29
};
+
/*
* GFT CAM line struct (for driversim use)
*/
struct gft_cam_line_mapped {
__le32 camline;
+/* Indication if the line is valid. */
#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+/* use enum gft_profile_upper_protocol_type
+ * (use enum gft_profile_upper_protocol_type)
+ */
#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+/* use enum gft_profile_upper_protocol_type
+ * (use enum gft_profile_upper_protocol_type)
+ */
#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
@@ -1250,11 +1535,13 @@ struct gft_cam_line_mapped {
#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
};
+
union gft_cam_line_union {
struct gft_cam_line cam_line;
struct gft_cam_line_mapped cam_line_mapped;
};
+
/*
* Used in gft_profile_key: Indication for ip version
*/
@@ -1264,17 +1551,24 @@ enum gft_profile_ip_version {
MAX_GFT_PROFILE_IP_VERSION
};
+
/*
* Profile key stucr fot GFT logic in Prs
*/
struct gft_profile_key {
__le16 profile_key;
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+/* use enum gft_profile_ip_version (use enum gft_profile_ip_version) */
#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+/* use enum gft_profile_upper_protocol_type
+ * (use enum gft_profile_upper_protocol_type)
+ */
#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+/* use enum gft_profile_tunnel_type (use enum gft_profile_tunnel_type) */
#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
@@ -1283,6 +1577,7 @@ struct gft_profile_key {
#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
};
+
/*
* Used in gft_profile_key: Indication for tunnel type
*/
@@ -1296,6 +1591,7 @@ enum gft_profile_tunnel_type {
MAX_GFT_PROFILE_TUNNEL_TYPE
};
+
/*
* Used in gft_profile_key: Indication for protocol type
*/
@@ -1319,11 +1615,13 @@ enum gft_profile_upper_protocol_type {
MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
};
+
/*
* GFT RAM line struct
*/
struct gft_ram_line {
__le32 low32bits;
+/* (use enum gft_vlan_select) */
#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
@@ -1411,6 +1709,7 @@ struct gft_ram_line {
#define GFT_RAM_LINE_RESERVED1_SHIFT 10
};
+
/*
* Used in the first 2 bits for gft_ram_line: Indication for vlan mask
*/
@@ -1422,36 +1721,39 @@ enum gft_vlan_select {
MAX_GFT_VLAN_SELECT
};
+
struct mstorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
+/* exist_in_qm0 */
#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0 /* word0 */;
__le16 word1 /* word1 */;
@@ -1459,224 +1761,322 @@ struct mstorm_eth_conn_ag_ctx {
__le32 reg1 /* reg1 */;
};
-/* @DPDK: xstormEthConnAgCtxDqExtLdPart */
-struct xstorm_eth_conn_ag_ctx_dq_ext_ld_part {
+
+
+
+struct xstormEthConnAgCtxDqExtLdPart {
u8 reserved0 /* cdu_validation */;
u8 eth_state /* state */;
u8 flags0;
+/* exist_in_qm0 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+/* cf_array_active */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+/* bit6 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+/* bit7 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
u8 flags1;
+/* bit8 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+/* bit9 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+/* bit10 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+/* bit11 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+/* bit12 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+/* bit13 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+/* bit14 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
+/* timer0cf */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+/* timer1cf */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+/* timer2cf */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+/* timer_stop_all */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
u8 flags3;
+/* cf4 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+/* cf5 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+/* cf6 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+/* cf7 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
u8 flags4;
+/* cf8 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+/* cf9 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+/* cf10 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+/* cf11 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
u8 flags5;
+/* cf12 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+/* cf13 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+/* cf14 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+/* cf15 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
u8 flags6;
+/* cf16 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+/* cf18 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+/* cf19 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
u8 flags7;
+/* cf20 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+/* cf21 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+/* cf22 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+/* cf0en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+/* cf1en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
u8 flags8;
+/* cf2en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+/* cf3en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+/* cf4en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+/* cf5en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+/* cf6en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+/* cf7en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+/* cf8en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+/* cf9en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
u8 flags9;
+/* cf10en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+/* cf11en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+/* cf12en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+/* cf13en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+/* cf14en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+/* cf15en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+/* cf16en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
+/* cf18en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+/* cf19en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+/* cf22en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+/* rule1en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
u8 flags11;
+/* rule2en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+/* rule3en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+/* rule4en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+/* rule6en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+/* rule7en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+/* rule8en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+/* rule9en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
u8 flags12;
+/* rule10en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+/* rule11en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+/* rule12en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+/* rule13en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+/* rule14en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+/* rule15en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+/* rule16en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+/* rule17en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
u8 flags13;
+/* rule18en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+/* rule19en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+/* rule20en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+/* rule21en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+/* rule22en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+/* rule23en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+/* rule24en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+/* rule25en */
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
u8 flags14;
+/* bit16 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+/* cf23 */
#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
u8 edpm_event_id /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
- __le16 word1 /* physical_q1 */;
+ __le16 quota /* physical_q1 */;
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
- __le16 go_to_bd_cons /* word5 */;
+ __le16 tx_class /* word5 */;
__le16 conn_dpi /* conn_dpi */;
u8 byte3 /* byte3 */;
u8 byte4 /* byte4 */;
@@ -1689,224 +2089,323 @@ struct xstorm_eth_conn_ag_ctx_dq_ext_ld_part {
__le32 reg4 /* reg4 */;
};
+
+
struct xstorm_eth_hw_conn_ag_ctx {
u8 reserved0 /* cdu_validation */;
u8 eth_state /* state */;
u8 flags0;
+/* exist_in_qm0 */
#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+/* bit6 */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+/* bit7 */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
+/* bit8 */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+/* bit9 */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
+/* timer0cf */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
+/* cf4 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+/* cf5 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+/* cf6 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+/* cf7 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
+/* cf8 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+/* cf9 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+/* cf10 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+/* cf11 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
+/* cf12 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+/* cf13 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+/* cf14 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+/* cf15 */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
+/* cf16 */
#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+/* cf18 */
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+/* cf19 */
#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
+/* cf20 */
#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+/* cf21 */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+/* cf22 */
#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
+/* cf2en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
+/* cf10en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
+/* cf18en */
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
+/* rule2en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
+/* rule10en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
+/* rule18en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
+/* bit16 */
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+/* cf23 */
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
- __le16 word1 /* physical_q1 */;
+ __le16 quota /* physical_q1 */;
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
- __le16 go_to_bd_cons /* word5 */;
+ __le16 tx_class /* word5 */;
__le16 conn_dpi /* conn_dpi */;
};
+
#endif /* __ECORE_HSI_ETH__ */
diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h
new file mode 100644
index 00000000..fca74791
--- /dev/null
+++ b/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_INIT_FUNC__
+#define __ECORE_HSI_INIT_FUNC__
+/********************************/
+/* HSI Init Functions constants */
+/********************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+
+/*
+ * BRB RAM init requirements
+ */
+struct init_brb_ram_req {
+ __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
+ __le32 headroom_per_tc /* headroom size per TC, in bytes */;
+ __le32 min_pkt_size /* min packet size, in bytes */;
+ __le32 max_ports_per_engine /* min packet size, in bytes */;
+ u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
+};
+
+
+/*
+ * ETS per-TC init requirements
+ */
+struct init_ets_tc_req {
+/* if set, this TC participates in the arbitration with a strict priority
+ * (the priority is equal to the TC ID)
+ */
+ u8 use_sp;
+/* if set, this TC participates in the arbitration with a WFQ weight
+ * (indicated by the weight field)
+ */
+ u8 use_wfq;
+/* An arbitration weight. Valid only if use_wfq is set. */
+ __le16 weight;
+};
+
+/*
+ * ETS init requirements
+ */
+struct init_ets_req {
+ __le32 mtu /* Max packet size (in bytes) */;
+/* ETS initialization requirements per TC. */
+ struct init_ets_tc_req tc_req[NUM_OF_TCS];
+};
+
+
+
+/*
+ * NIG LB RL init requirements
+ */
+struct init_nig_lb_rl_req {
+/* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
+ __le16 lb_mac_rate;
+/* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */
+ __le16 lb_rate;
+ __le32 mtu /* Max packet size (in bytes) */;
+/* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */
+ __le16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+
+/*
+ * NIG TC mapping for each priority
+ */
+struct init_nig_pri_tc_map_entry {
+ u8 tc_id /* the mapped TC ID */;
+ u8 valid /* indicates if the mapping entry is valid */;
+};
+
+
+/*
+ * NIG priority to TC map init requirements
+ */
+struct init_nig_pri_tc_map_req {
+ struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
+
+/*
+ * QM per-port init parameters
+ */
+struct init_qm_port_params {
+ u8 active /* Indicates if this port is active */;
+/* Vector of valid bits for active TCs used by this port */
+ u8 active_phys_tcs;
+/* number of PBF command lines that can be used by this port */
+ __le16 num_pbf_cmd_lines;
+/* number of BTB blocks that can be used by this port */
+ __le16 num_btb_blocks;
+ __le16 reserved;
+};
+
+
+/*
+ * QM per-PQ init parameters
+ */
+struct init_qm_pq_params {
+ u8 vport_id /* VPORT ID */;
+ u8 tc_id /* TC ID */;
+ u8 wrr_group /* WRR group */;
+/* Indicates if a rate limiter should be allocated for the PQ (0/1) */
+ u8 rl_valid;
+};
+
+
+/*
+ * QM per-vport init parameters
+ */
+struct init_qm_vport_params {
+/* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if
+ * VPORT RL is globally disabled.
+ */
+ __le32 vport_rl;
+/* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is
+ * globally disabled.
+ */
+ __le16 vport_wfq;
+/* the first Tx PQ ID associated with this VPORT for each TC. */
+ __le16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+#endif /* __ECORE_HSI_INIT_FUNC__ */
diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h
new file mode 100644
index 00000000..410b0bcb
--- /dev/null
+++ b/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#ifndef __ECORE_HSI_INIT_TOOL__
+#define __ECORE_HSI_INIT_TOOL__
+/**************************************/
+/* Init Tool HSI constants and macros */
+/**************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
+
+struct fw_asserts_ram_section {
+/* The offset of the section in the RAM in RAM lines (64-bit units) */
+ __le16 section_ram_line_offset;
+/* The size of the section in RAM lines (64-bit units) */
+ __le16 section_ram_line_size;
+/* The offset of the asserts list within the section in dwords */
+ u8 list_dword_offset;
+/* The size of an assert list element in dwords */
+ u8 list_element_dword_size;
+ u8 list_num_elements /* The number of elements in the asserts list */;
+/* The offset of the next list index field within the section in dwords */
+ u8 list_next_index_dword_offset;
+};
+
+
+struct fw_ver_num {
+ u8 major /* Firmware major version number */;
+ u8 minor /* Firmware minor version number */;
+ u8 rev /* Firmware revision version number */;
+/* Firmware engineering version number (for bootleg versions) */
+ u8 eng;
+};
+
+struct fw_ver_info {
+ __le16 tools_ver /* Tools version number */;
+ u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
+ u8 reserved1;
+ struct fw_ver_num num /* FW version number */;
+ __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
+ __le32 reserved2;
+};
+
+struct fw_info {
+ struct fw_ver_info ver /* FW version information */;
+/* Info regarding the FW asserts section in the Storm RAM */
+ struct fw_asserts_ram_section fw_asserts_section;
+};
+
+
+struct fw_info_location {
+/* GRC address where the fw_info struct is located. */
+ __le32 grc_addr;
+/* Size of the fw_info structure (thats located at the grc_addr). */
+ __le32 size;
+};
+
+
+
+
+enum init_modes {
+ MODE_BB_A0,
+ MODE_BB_B0,
+ MODE_K2,
+ MODE_ASIC,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_100G,
+ MODE_40G,
+ MODE_EAGLE_ENG1_WORKAROUND,
+ MAX_INIT_MODES
+};
+
+
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_VF,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+
+enum init_split_types {
+ SPLIT_TYPE_NONE,
+ SPLIT_TYPE_PORT,
+ SPLIT_TYPE_PF,
+ SPLIT_TYPE_PORT_PF,
+ SPLIT_TYPE_VF,
+ MAX_INIT_SPLIT_TYPES
+};
+
+
+/*
+ * Binary buffer header
+ */
+struct bin_buffer_hdr {
+/* buffer offset in bytes from the beginning of the binary file */
+ __le32 offset;
+ __le32 length /* buffer length in bytes */;
+};
+
+
+/*
+ * binary init buffer types
+ */
+enum bin_init_buffer_type {
+ BIN_BUF_INIT_FW_VER_INFO /* fw_ver_info struct */,
+ BIN_BUF_INIT_CMD /* init commands */,
+ BIN_BUF_INIT_VAL /* init data */,
+ BIN_BUF_INIT_MODE_TREE /* init modes tree */,
+ BIN_BUF_INIT_IRO /* internal RAM offsets */,
+ MAX_BIN_INIT_BUFFER_TYPE
+};
+
+
+/*
+ * init array header: raw
+ */
+struct init_array_raw_hdr {
+ __le32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+/* init array params */
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+/*
+ * init array header: standard
+ */
+struct init_array_standard_hdr {
+ __le32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+/* Init array size (in dwords) */
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: zipped
+ */
+struct init_array_zipped_hdr {
+ __le32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+/* Init array zipped size (in bytes) */
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+/*
+ * init array header: pattern
+ */
+struct init_array_pattern_hdr {
+ __le32 data;
+/* Init array type, from init_array_types enum */
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+/* pattern size in dword */
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+/* pattern repetitions */
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+};
+
+/*
+ * init array header union
+ */
+union init_array_hdr {
+ struct init_array_raw_hdr raw /* raw init array header */;
+/* standard init array header */
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped /* zipped init array header */;
+ struct init_array_pattern_hdr pattern /* pattern init array header */;
+};
+
+
+
+
+
+/*
+ * init array types
+ */
+enum init_array_types {
+ INIT_ARR_STANDARD /* standard init array */,
+ INIT_ARR_ZIPPED /* zipped init array */,
+ INIT_ARR_PATTERN /* a repeated pattern */,
+ MAX_INIT_ARRAY_TYPES
+};
+
+
+
+/*
+ * init operation: callback
+ */
+struct init_callback_op {
+ __le32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id /* Callback ID */;
+ __le16 block_id /* Blocks ID */;
+};
+
+
+/*
+ * init operation: delay
+ */
+struct init_delay_op {
+ __le32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay /* delay in us */;
+};
+
+
+/*
+ * init operation: if_mode
+ */
+struct init_if_mode_op {
+ __le32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+/* Commands to skip if the modes dont match */
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+/* offset (in bytes) in modes expression buffer */
+ __le16 modes_buf_offset;
+};
+
+
+/*
+ * init operation: if_phase
+ */
+struct init_if_phase_op {
+ __le32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+/* Indicates if DMAE is enabled in this phase */
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+/* Commands to skip if the phases dont match */
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+ __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+};
+
+
+/*
+ * init mode operators
+ */
+enum init_mode_ops {
+ INIT_MODE_OP_NOT /* init mode not operator */,
+ INIT_MODE_OP_OR /* init mode or operator */,
+ INIT_MODE_OP_AND /* init mode and operator */,
+ MAX_INIT_MODE_OPS
+};
+
+
+/*
+ * init operation: raw
+ */
+struct init_raw_op {
+ __le32 op_data;
+/* Init operation, from init_op_types enum */
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2 /* Init param 2 */;
+};
+
+/*
+ * init array params
+ */
+struct init_op_array_params {
+ __le16 size /* array size in dwords */;
+ __le16 offset /* array start offset in dwords */;
+};
+
+/*
+ * Write init operation arguments
+ */
+union init_write_args {
+/* value to write, used when init source is INIT_SRC_INLINE */
+ __le32 inline_val;
+/* number of zeros to write, used when init source is INIT_SRC_ZEROS */
+ __le32 zeros_count;
+/* array offset to write, used when init source is INIT_SRC_ARRAY */
+ __le32 array_offset;
+/* runtime array params to write, used when init source is INIT_SRC_RUNTIME */
+ struct init_op_array_params runtime;
+};
+
+/*
+ * init operation: write
+ */
+struct init_write_op {
+ __le32 data;
+/* init operation, from init_op_types enum */
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+/* init source type, taken from init_source_types enum */
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+/* indicates if the register is wide-bus */
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+/* internal (absolute) GRC address, in dwords */
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args /* Write init operation arguments */;
+};
+
+/*
+ * init operation: read
+ */
+struct init_read_op {
+ __le32 op_data;
+/* init operation, from init_op_types enum */
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+/* polling type, from init_poll_types enum */
+#define INIT_READ_OP_POLL_TYPE_MASK 0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 8
+/* internal (absolute) GRC address, in dwords */
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
+/* expected polling value, used only when polling is done */
+ __le32 expected_val;
+};
+
+/*
+ * Init operations union
+ */
+union init_op {
+ struct init_raw_op raw /* raw init operation */;
+ struct init_write_op write /* write init operation */;
+ struct init_read_op read /* read init operation */;
+ struct init_if_mode_op if_mode /* if_mode init operation */;
+ struct init_if_phase_op if_phase /* if_phase init operation */;
+ struct init_callback_op callback /* callback init operation */;
+ struct init_delay_op delay /* delay init operation */;
+};
+
+
+
+/*
+ * Init command operation types
+ */
+enum init_op_types {
+ INIT_OP_READ /* GRC read init command */,
+ INIT_OP_WRITE /* GRC write init command */,
+/* Skip init commands if the init modes expression doesn't match */
+ INIT_OP_IF_MODE,
+/* Skip init commands if the init phase doesn't match */
+ INIT_OP_IF_PHASE,
+ INIT_OP_DELAY /* delay init command */,
+ INIT_OP_CALLBACK /* callback init command */,
+ MAX_INIT_OP_TYPES
+};
+
+
+/*
+ * init polling types
+ */
+enum init_poll_types {
+ INIT_POLL_NONE /* No polling */,
+ INIT_POLL_EQ /* init value is included in the init command */,
+ INIT_POLL_OR /* init value is all zeros */,
+ INIT_POLL_AND /* init value is an array of values */,
+ MAX_INIT_POLL_TYPES
+};
+
+
+
+
+/*
+ * init source types
+ */
+enum init_source_types {
+ INIT_SRC_INLINE /* init value is included in the init command */,
+ INIT_SRC_ZEROS /* init value is all zeros */,
+ INIT_SRC_ARRAY /* init value is an array of values */,
+ INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ MAX_INIT_SOURCE_TYPES
+};
+
+
+
+
+/*
+ * Internal RAM Offsets macro data
+ */
+struct iro {
+ __le32 base /* RAM field offset */;
+ __le16 m1 /* multiplier 1 */;
+ __le16 m2 /* multiplier 2 */;
+ __le16 m3 /* multiplier 3 */;
+ __le16 size /* RAM field size */;
+};
+
+#endif /* __ECORE_HSI_INIT_TOOL__ */
diff --git a/drivers/net/qede/base/ecore_hsi_tools.h b/drivers/net/qede/base/ecore_hsi_tools.h
deleted file mode 100644
index 18eea762..00000000
--- a/drivers/net/qede/base/ecore_hsi_tools.h
+++ /dev/null
@@ -1,1081 +0,0 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-#ifndef __ECORE_HSI_TOOLS__
-#define __ECORE_HSI_TOOLS__
-/**********************************/
-/* Tools HSI constants and macros */
-/**********************************/
-
-/*********************************** Init ************************************/
-
-/* Width of GRC address in bits (addresses are specified in dwords) */
-#define GRC_ADDR_BITS 23
-#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
-
-/* indicates an init that should be applied to any phase ID */
-#define ANY_PHASE_ID 0xffff
-
-/* init pattern size in bytes */
-#define INIT_PATTERN_SIZE_BITS 4
-#define MAX_INIT_PATTERN_SIZE (1 << INIT_PATTERN_SIZE_BITS)
-
-/* Max size in dwords of a zipped array */
-#define MAX_ZIPPED_SIZE 8192
-
-/* Global PXP window */
-#define NUM_OF_PXP_WIN 19
-#define PXP_WIN_DWORD_SIZE_BITS 10
-#define PXP_WIN_DWORD_SIZE (1 << PXP_WIN_DWORD_SIZE_BITS)
-#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
-#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4)
-
-/********************************* GRC Dump **********************************/
-
-/* width of GRC dump register sequence length in bits */
-#define DUMP_SEQ_LEN_BITS 8
-#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1)
-
-/* width of GRC dump memory length in bits */
-#define DUMP_MEM_LEN_BITS 18
-#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1)
-
-/* width of register type ID in bits */
-#define REG_TYPE_ID_BITS 6
-#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1)
-
-/* width of block ID in bits */
-#define BLOCK_ID_BITS 8
-#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1)
-
-/******************************** Idle Check *********************************/
-
-/* max number of idle check predicate immediates */
-#define MAX_IDLE_CHK_PRED_IMM 3
-
-/* max number of idle check argument registers */
-#define MAX_IDLE_CHK_READ_REGS 3
-
-/* max number of idle check loops */
-#define MAX_IDLE_CHK_LOOPS 0x10000
-
-/* max idle check address increment */
-#define MAX_IDLE_CHK_INCREMENT 0x10000
-
-/* inicates an undefined idle check line index */
-#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff
-
-/* max number of register values following the idle check header for LSI */
-#define IDLE_CHK_MAX_LSI_DUMP_REGS 2
-
-/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
-#define IDLE_CHK_QM_RD_WR_PTR 0
-#define IDLE_CHK_QM_RD_WR_BANK 1
-
-/**************************************/
-/* HSI Functions constants and macros */
-/**************************************/
-
-/* Number of VLAN priorities */
-#define NUM_OF_VLAN_PRIORITIES 8
-
-/* the MCP Trace meta data signautre is duplicated in the
- * perl script that generats the NVRAM images
- */
-#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
-
-/* Maximal number of RAM lines occupied by FW Asserts data */
-#define MAX_FW_ASSERTS_RAM_LINES 800
-
-/*
- * Binary buffer header
- */
-struct bin_buffer_hdr {
- __le32 offset
- /* buffer offset in bytes from the beginning of the binary file */;
- __le32 length /* buffer length in bytes */;
-};
-
-/*
- * binary buffer types
- */
-enum bin_buffer_type {
- BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
- BIN_BUF_INIT_CMD /* init commands */,
- BIN_BUF_INIT_VAL /* init data */,
- BIN_BUF_INIT_MODE_TREE /* init modes tree */,
- BIN_BUF_IRO /* internal RAM offsets array */,
- MAX_BIN_BUFFER_TYPE
-};
-
-/*
- * Chip IDs
- */
-enum chip_ids {
- CHIP_BB_A0 /* BB A0 chip ID */,
- CHIP_BB_B0 /* BB B0 chip ID */,
- CHIP_K2 /* AH chip ID */,
- MAX_CHIP_IDS
-};
-
-/*
- * memory dump descriptor
- */
-struct dbg_dump_mem_desc {
- __le32 dword0;
-#define DBG_DUMP_MEM_DESC_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_DESC_ADDRESS_SHIFT 0
-#define DBG_DUMP_MEM_DESC_ASIC_CHIP_MASK_MASK 0xF
-#define DBG_DUMP_MEM_DESC_ASIC_CHIP_MASK_SHIFT 24
-#define DBG_DUMP_MEM_DESC_SIM_CHIP_MASK_MASK 0xF
-#define DBG_DUMP_MEM_DESC_SIM_CHIP_MASK_SHIFT 28
- __le32 dword1;
-#define DBG_DUMP_MEM_DESC_LENGTH_MASK 0x3FFFF
-#define DBG_DUMP_MEM_DESC_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_DESC_REG_TYPE_ID_MASK 0x3F
-#define DBG_DUMP_MEM_DESC_REG_TYPE_ID_SHIFT 18
-#define DBG_DUMP_MEM_DESC_BLOCK_ID_MASK 0xFF
-#define DBG_DUMP_MEM_DESC_BLOCK_ID_SHIFT 24
-};
-
-/*
- * registers dump descriptor: chip
- */
-struct dbg_dump_regs_chip_desc {
- __le32 data;
-#define DBG_DUMP_REGS_CHIP_DESC_IS_CHIP_MASK_MASK 0x1
-#define DBG_DUMP_REGS_CHIP_DESC_IS_CHIP_MASK_SHIFT 0
-#define DBG_DUMP_REGS_CHIP_DESC_ASIC_CHIP_MASK_MASK 0x7FFFFF
-#define DBG_DUMP_REGS_CHIP_DESC_ASIC_CHIP_MASK_SHIFT 1
-#define DBG_DUMP_REGS_CHIP_DESC_SIM_CHIP_MASK_MASK 0xFF
-#define DBG_DUMP_REGS_CHIP_DESC_SIM_CHIP_MASK_SHIFT 24
-};
-
-/*
- * registers dump descriptor: raw
- */
-struct dbg_dump_regs_raw_desc {
- __le32 data;
-#define DBG_DUMP_REGS_RAW_DESC_IS_CHIP_MASK_MASK 0x1
-#define DBG_DUMP_REGS_RAW_DESC_IS_CHIP_MASK_SHIFT 0
-#define DBG_DUMP_REGS_RAW_DESC_PARAM1_MASK 0x7FFFFF
-#define DBG_DUMP_REGS_RAW_DESC_PARAM1_SHIFT 1
-#define DBG_DUMP_REGS_RAW_DESC_PARAM2_MASK 0xFF
-#define DBG_DUMP_REGS_RAW_DESC_PARAM2_SHIFT 24
-};
-
-/*
- * registers dump descriptor: sequence
- */
-struct dbg_dump_regs_seq_desc {
- __le32 data;
-#define DBG_DUMP_REGS_SEQ_DESC_IS_CHIP_MASK_MASK 0x1
-#define DBG_DUMP_REGS_SEQ_DESC_IS_CHIP_MASK_SHIFT 0
-#define DBG_DUMP_REGS_SEQ_DESC_ADDRESS_MASK 0x7FFFFF
-#define DBG_DUMP_REGS_SEQ_DESC_ADDRESS_SHIFT 1
-#define DBG_DUMP_REGS_SEQ_DESC_LENGTH_MASK 0xFF
-#define DBG_DUMP_REGS_SEQ_DESC_LENGTH_SHIFT 24
-};
-
-/*
- * registers dump descriptor
- */
-union dbg_dump_regs_desc {
- struct dbg_dump_regs_raw_desc raw /* dumped registers raw descriptor */
- ;
- struct dbg_dump_regs_seq_desc seq /* dumped registers seq descriptor */
- ;
- struct dbg_dump_regs_chip_desc chip
- /* dumped registers chip descriptor */;
-};
-
-/*
- * idle check macro types
- */
-enum idle_chk_macro_types {
- IDLE_CHK_MACRO_TYPE_COMPARE /* parametric register comparison */,
- IDLE_CHK_MACRO_TYPE_QM_RD_WR /* compare QM r/w pointers and banks */,
- MAX_IDLE_CHK_MACRO_TYPES
-};
-
-/*
- * Idle Check result header
- */
-struct idle_chk_result_hdr {
- __le16 rule_idx /* Idle check rule index in CSV file */;
- __le16 loop_idx /* the loop index in which the failure occurred */;
- __le16 num_fw_values;
- __le16 data;
-#define IDLE_CHK_RESULT_HDR_NUM_LSI_VALUES_MASK 0xF
-#define IDLE_CHK_RESULT_HDR_NUM_LSI_VALUES_SHIFT 0
-#define IDLE_CHK_RESULT_HDR_LOOP_VALID_MASK 0x1
-#define IDLE_CHK_RESULT_HDR_LOOP_VALID_SHIFT 4
-#define IDLE_CHK_RESULT_HDR_SEVERITY_MASK 0x7
-#define IDLE_CHK_RESULT_HDR_SEVERITY_SHIFT 5
-#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_MASK 0xF
-#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_SHIFT 8
-#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_ARG_MASK 0xF
-#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_ARG_SHIFT 12
-};
-
-/*
- * Idle Check rule
- */
-struct idle_chk_rule {
- __le32 data;
-#define IDLE_CHK_RULE_ASIC_CHIP_MASK_MASK 0xF
-#define IDLE_CHK_RULE_ASIC_CHIP_MASK_SHIFT 0
-#define IDLE_CHK_RULE_SIM_CHIP_MASK_MASK 0xF
-#define IDLE_CHK_RULE_SIM_CHIP_MASK_SHIFT 4
-#define IDLE_CHK_RULE_BLOCK_ID_MASK 0xFF
-#define IDLE_CHK_RULE_BLOCK_ID_SHIFT 8
-#define IDLE_CHK_RULE_MACRO_TYPE_MASK 0xF
-#define IDLE_CHK_RULE_MACRO_TYPE_SHIFT 16
-#define IDLE_CHK_RULE_SEVERITY_MASK 0x7
-#define IDLE_CHK_RULE_SEVERITY_SHIFT 20
-#define IDLE_CHK_RULE_RESERVED_MASK 0x1
-#define IDLE_CHK_RULE_RESERVED_SHIFT 23
-#define IDLE_CHK_RULE_PRED_ID_MASK 0xFF
-#define IDLE_CHK_RULE_PRED_ID_SHIFT 24
- __le16 loop;
- __le16 increment
- /* address increment of first argument register on each iteration */
- ;
- __le32 reg_addr[3];
- __le32 pred_imm[3]
- /* immediate values passed as arguments to the idle check rule */;
-};
-
-/*
- * idle check severity types
- */
-enum idle_chk_severity_types {
- IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */,
- IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC
- ,
- IDLE_CHK_SEVERITY_WARNING
- /* idle check failure should cause a warning */,
- MAX_IDLE_CHK_SEVERITY_TYPES
-};
-
-/*
- * init array header: raw
- */
-struct init_array_raw_hdr {
- __le32 data;
-#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
-#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
-};
-
-/*
- * init array header: standard
- */
-struct init_array_standard_hdr {
- __le32 data;
-#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
-#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
-};
-
-/*
- * init array header: zipped
- */
-struct init_array_zipped_hdr {
- __le32 data;
-#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
-};
-
-/*
- * init array header: pattern
- */
-struct init_array_pattern_hdr {
- __le32 data;
-#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
-#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
-#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
-#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
-};
-
-/*
- * init array header union
- */
-union init_array_hdr {
- struct init_array_raw_hdr raw /* raw init array header */;
- struct init_array_standard_hdr standard /* standard init array header */
- ;
- struct init_array_zipped_hdr zipped /* zipped init array header */;
- struct init_array_pattern_hdr pattern /* pattern init array header */;
-};
-
-/*
- * init array types
- */
-enum init_array_types {
- INIT_ARR_STANDARD /* standard init array */,
- INIT_ARR_ZIPPED /* zipped init array */,
- INIT_ARR_PATTERN /* a repeated pattern */,
- MAX_INIT_ARRAY_TYPES
-};
-
-/*
- * init operation: callback
- */
-struct init_callback_op {
- __le32 op_data;
-#define INIT_CALLBACK_OP_OP_MASK 0xF
-#define INIT_CALLBACK_OP_OP_SHIFT 0
-#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
-#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
- __le16 callback_id /* Callback ID */;
- __le16 block_id /* Blocks ID */;
-};
-
-/*
- * init operation: delay
- */
-struct init_delay_op {
- __le32 op_data;
-#define INIT_DELAY_OP_OP_MASK 0xF
-#define INIT_DELAY_OP_OP_SHIFT 0
-#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
-#define INIT_DELAY_OP_RESERVED_SHIFT 4
- __le32 delay /* delay in us */;
-};
-
-/*
- * init operation: if_mode
- */
-struct init_if_mode_op {
- __le32 op_data;
-#define INIT_IF_MODE_OP_OP_MASK 0xF
-#define INIT_IF_MODE_OP_OP_SHIFT 0
-#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
-#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
-#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
-#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
- __le16 reserved2;
- __le16 modes_buf_offset
- /* offset (in bytes) in modes expression buffer */;
-};
-
-/*
- * init operation: if_phase
- */
-struct init_if_phase_op {
- __le32 op_data;
-#define INIT_IF_PHASE_OP_OP_MASK 0xF
-#define INIT_IF_PHASE_OP_OP_SHIFT 0
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
-#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
-#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
-#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
-#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
- __le32 phase_data;
-#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF
-#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
-#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
-#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
-#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF
-#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
-};
-
-/*
- * init mode operators
- */
-enum init_mode_ops {
- INIT_MODE_OP_NOT /* init mode not operator */,
- INIT_MODE_OP_OR /* init mode or operator */,
- INIT_MODE_OP_AND /* init mode and operator */,
- MAX_INIT_MODE_OPS
-};
-
-/*
- * init operation: raw
- */
-struct init_raw_op {
- __le32 op_data;
-#define INIT_RAW_OP_OP_MASK 0xF
-#define INIT_RAW_OP_OP_SHIFT 0
-#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF
-#define INIT_RAW_OP_PARAM1_SHIFT 4
- __le32 param2 /* Init param 2 */;
-};
-
-/*
- * init array params
- */
-struct init_op_array_params {
- __le16 size /* array size in dwords */;
- __le16 offset /* array start offset in dwords */;
-};
-
-/*
- * Write init operation arguments
- */
-union init_write_args {
- __le32 inline_val
- /* value to write, used when init source is INIT_SRC_INLINE */;
- __le32 zeros_count;
- __le32 array_offset
- /* array offset to write, used when init source is INIT_SRC_ARRAY */
- ;
- struct init_op_array_params runtime;
-};
-
-/*
- * init operation: write
- */
-struct init_write_op {
- __le32 data;
-#define INIT_WRITE_OP_OP_MASK 0xF
-#define INIT_WRITE_OP_OP_SHIFT 0
-#define INIT_WRITE_OP_SOURCE_MASK 0x7
-#define INIT_WRITE_OP_SOURCE_SHIFT 4
-#define INIT_WRITE_OP_RESERVED_MASK 0x1
-#define INIT_WRITE_OP_RESERVED_SHIFT 7
-#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
-#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
-#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
-#define INIT_WRITE_OP_ADDRESS_SHIFT 9
- union init_write_args args /* Write init operation arguments */;
-};
-
-/*
- * init operation: read
- */
-struct init_read_op {
- __le32 op_data;
-#define INIT_READ_OP_OP_MASK 0xF
-#define INIT_READ_OP_OP_SHIFT 0
-#define INIT_READ_OP_POLL_TYPE_MASK 0xF
-#define INIT_READ_OP_POLL_TYPE_SHIFT 4
-#define INIT_READ_OP_RESERVED_MASK 0x1
-#define INIT_READ_OP_RESERVED_SHIFT 8
-#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
-#define INIT_READ_OP_ADDRESS_SHIFT 9
- __le32 expected_val
- /* expected polling value, used only when polling is done */;
-};
-
-/*
- * Init operations union
- */
-union init_op {
- struct init_raw_op raw /* raw init operation */;
- struct init_write_op write /* write init operation */;
- struct init_read_op read /* read init operation */;
- struct init_if_mode_op if_mode /* if_mode init operation */;
- struct init_if_phase_op if_phase /* if_phase init operation */;
- struct init_callback_op callback /* callback init operation */;
- struct init_delay_op delay /* delay init operation */;
-};
-
-/*
- * Init command operation types
- */
-enum init_op_types {
- INIT_OP_READ /* GRC read init command */,
- INIT_OP_WRITE /* GRC write init command */,
- INIT_OP_IF_MODE
- /* Skip init commands if the init modes expression doesn't match */,
- INIT_OP_IF_PHASE
- /* Skip init commands if the init phase doesn't match */,
- INIT_OP_DELAY /* delay init command */,
- INIT_OP_CALLBACK /* callback init command */,
- MAX_INIT_OP_TYPES
-};
-
-/*
- * init polling types
- */
-enum init_poll_types {
- INIT_POLL_NONE /* No polling */,
- INIT_POLL_EQ /* init value is included in the init command */,
- INIT_POLL_OR /* init value is all zeros */,
- INIT_POLL_AND /* init value is an array of values */,
- MAX_INIT_POLL_TYPES
-};
-
-/*
- * init source types
- */
-enum init_source_types {
- INIT_SRC_INLINE /* init value is included in the init command */,
- INIT_SRC_ZEROS /* init value is all zeros */,
- INIT_SRC_ARRAY /* init value is an array of values */,
- INIT_SRC_RUNTIME /* init value is provided during runtime */,
- MAX_INIT_SOURCE_TYPES
-};
-
-/*
- * Internal RAM Offsets macro data
- */
-struct iro {
- __le32 base /* RAM field offset */;
- __le16 m1 /* multiplier 1 */;
- __le16 m2 /* multiplier 2 */;
- __le16 m3 /* multiplier 3 */;
- __le16 size /* RAM field size */;
-};
-
-/*
- * register descriptor
- */
-struct reg_desc {
- __le32 data;
-#define REG_DESC_ADDRESS_MASK 0xFFFFFF
-#define REG_DESC_ADDRESS_SHIFT 0
-#define REG_DESC_SIZE_MASK 0xFF
-#define REG_DESC_SIZE_SHIFT 24
-};
-
-/*
- * Debug Bus block data
- */
-struct dbg_bus_block_data {
- u8 enabled /* Indicates if the block is enabled for recording (0/1) */;
- u8 hw_id /* HW ID associated with the block */;
- u8 line_num /* Debug line number to select */;
- u8 right_shift /* Number of units to right the debug data (0-3) */;
- u8 cycle_en /* 4-bit value: bit i set -> unit i is enabled. */;
- u8 force_valid /* 4-bit value: bit i set -> unit i is forced valid. */;
- u8 force_frame
- /* 4-bit value: bit i set -> unit i frame bit is forced. */;
- u8 reserved;
-};
-
-/*
- * Debug Bus Clients
- */
-enum dbg_bus_clients {
- DBG_BUS_CLIENT_RBCN,
- DBG_BUS_CLIENT_RBCP,
- DBG_BUS_CLIENT_RBCR,
- DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCF,
- DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCZ,
- DBG_BUS_CLIENT_OTHER_ENGINE,
- DBG_BUS_CLIENT_TIMESTAMP,
- DBG_BUS_CLIENT_CPU,
- DBG_BUS_CLIENT_RBCY,
- DBG_BUS_CLIENT_RBCQ,
- DBG_BUS_CLIENT_RBCM,
- DBG_BUS_CLIENT_RBCB,
- DBG_BUS_CLIENT_RBCW,
- DBG_BUS_CLIENT_RBCV,
- MAX_DBG_BUS_CLIENTS
-};
-
-/*
- * Debug Bus constraint operation types
- */
-enum dbg_bus_constraint_ops {
- DBG_BUS_CONSTRAINT_OP_EQ /* equal */,
- DBG_BUS_CONSTRAINT_OP_NE /* not equal */,
- DBG_BUS_CONSTRAINT_OP_LT /* less than */,
- DBG_BUS_CONSTRAINT_OP_LTC /* less than (cyclic) */,
- DBG_BUS_CONSTRAINT_OP_LE /* less than or equal */,
- DBG_BUS_CONSTRAINT_OP_LEC /* less than or equal (cyclic) */,
- DBG_BUS_CONSTRAINT_OP_GT /* greater than */,
- DBG_BUS_CONSTRAINT_OP_GTC /* greater than (cyclic) */,
- DBG_BUS_CONSTRAINT_OP_GE /* greater than or equal */,
- DBG_BUS_CONSTRAINT_OP_GEC /* greater than or equal (cyclic) */,
- MAX_DBG_BUS_CONSTRAINT_OPS
-};
-
-/*
- * Debug Bus memory address
- */
-struct dbg_bus_mem_addr {
- __le32 lo;
- __le32 hi;
-};
-
-/*
- * Debug Bus PCI buffer data
- */
-struct dbg_bus_pci_buf_data {
- struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */;
- struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */;
- __le32 size /* PCI buffer size in bytes */;
-};
-
-/*
- * Debug Bus Storm EID range filter params
- */
-struct dbg_bus_storm_eid_range_params {
- u8 min /* Minimal event ID to filter on */;
- u8 max /* Maximal event ID to filter on */;
-};
-
-/*
- * Debug Bus Storm EID mask filter params
- */
-struct dbg_bus_storm_eid_mask_params {
- u8 val /* Event ID value */;
- u8 mask /* Event ID mask. 1s in the mask = dont care bits. */;
-};
-
-/*
- * Debug Bus Storm EID filter params
- */
-union dbg_bus_storm_eid_params {
- struct dbg_bus_storm_eid_range_params range
- /* EID range filter params */;
- struct dbg_bus_storm_eid_mask_params mask /* EID mask filter params */;
-};
-
-/*
- * Debug Bus Storm data
- */
-struct dbg_bus_storm_data {
- u8 fast_enabled;
- u8 fast_mode
- /* Fast debug Storm mode, valid only if fast_enabled is set */;
- u8 slow_enabled;
- u8 slow_mode
- /* Slow debug Storm mode, valid only if slow_enabled is set */;
- u8 hw_id /* HW ID associated with the Storm */;
- u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
- u8 eid_range_not_mask;
- u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
- union dbg_bus_storm_eid_params eid_filter_params;
- __le16 reserved;
- __le32 cid /* CID to filter on. Valid only if cid_filter_en is set. */;
-};
-
-/*
- * Debug Bus data
- */
-struct dbg_bus_data {
- __le32 app_version /* The tools version number of the application */;
- u8 state /* The current debug bus state */;
- u8 hw_dwords /* HW dwords per cycle */;
- u8 next_hw_id /* Next HW ID to be associated with an input */;
- u8 num_enabled_blocks /* Number of blocks enabled for recording */;
- u8 num_enabled_storms /* Number of Storms enabled for recording */;
- u8 target /* Output target */;
- u8 next_trigger_state /* ID of next trigger state to be added */;
- u8 next_constraint_id
- /* ID of next filter/trigger constraint to be added */;
- u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
- u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
- u8 timestamp_input_en
- /* Indicates if timestamp recording is enabled (0/1) */;
- u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
- u8 trigger_en /* Indicates if the recording trigger is enabled (0/1) */
- ;
- u8 adding_filter;
- u8 filter_pre_trigger;
- u8 filter_post_trigger;
- u8 unify_inputs;
- u8 rcv_from_other_engine;
- struct dbg_bus_pci_buf_data pci_buf;
- __le16 reserved;
- struct dbg_bus_block_data blocks[80] /* Debug Bus data for each block */
- ;
- struct dbg_bus_storm_data storms[6] /* Debug Bus data for each block */
- ;
-};
-
-/*
- * Debug bus filter types
- */
-enum dbg_bus_filter_types {
- DBG_BUS_FILTER_TYPE_OFF /* filter always off */,
- DBG_BUS_FILTER_TYPE_PRE /* filter before trigger only */,
- DBG_BUS_FILTER_TYPE_POST /* filter after trigger only */,
- DBG_BUS_FILTER_TYPE_ON /* filter always on */,
- MAX_DBG_BUS_FILTER_TYPES
-};
-
-/*
- * Debug bus frame modes
- */
-enum dbg_bus_frame_modes {
- DBG_BUS_FRAME_MODE_0HW_4ST = 0 /* 0 HW dwords, 4 Storm dwords */,
- DBG_BUS_FRAME_MODE_4HW_0ST = 3 /* 4 HW dwords, 0 Storm dwords */,
- DBG_BUS_FRAME_MODE_8HW_0ST = 4 /* 8 HW dwords, 0 Storm dwords */,
- MAX_DBG_BUS_FRAME_MODES
-};
-
-/*
- * Debug bus input types
- */
-enum dbg_bus_input_types {
- DBG_BUS_INPUT_TYPE_STORM,
- DBG_BUS_INPUT_TYPE_BLOCK,
- MAX_DBG_BUS_INPUT_TYPES
-};
-
-/*
- * Debug bus other engine mode
- */
-enum dbg_bus_other_engine_modes {
- DBG_BUS_OTHER_ENGINE_MODE_NONE,
- DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
- DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
- DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
- DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX,
- MAX_DBG_BUS_OTHER_ENGINE_MODES
-};
-
-/*
- * Debug bus post-trigger recording types
- */
-enum dbg_bus_post_trigger_types {
- DBG_BUS_POST_TRIGGER_RECORD /* start recording after trigger */,
- DBG_BUS_POST_TRIGGER_DROP /* drop data after trigger */,
- MAX_DBG_BUS_POST_TRIGGER_TYPES
-};
-
-/*
- * Debug bus pre-trigger recording types
- */
-enum dbg_bus_pre_trigger_types {
- DBG_BUS_PRE_TRIGGER_START_FROM_ZERO /* start recording from time 0 */,
- DBG_BUS_PRE_TRIGGER_NUM_CHUNKS
- /* start recording some chunks before trigger */,
- DBG_BUS_PRE_TRIGGER_DROP /* drop data before trigger */,
- MAX_DBG_BUS_PRE_TRIGGER_TYPES
-};
-
-/*
- * Debug bus SEMI frame modes
- */
-enum dbg_bus_semi_frame_modes {
- DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST =
- 0 /* 0 slow dwords, 4 fast dwords */,
- DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST =
- 3 /* 4 slow dwords, 0 fast dwords */,
- MAX_DBG_BUS_SEMI_FRAME_MODES
-};
-
-/*
- * Debug bus states
- */
-enum dbg_bus_states {
- DBG_BUS_STATE_BEFORE_RECORD /* before debug bus the recording starts */
- ,
- DBG_BUS_STATE_DURING_RECORD /* during debug bus recording */,
- DBG_BUS_STATE_AFTER_RECORD /* after debug bus recording */,
- MAX_DBG_BUS_STATES
-};
-
-/*
- * Debug Bus Storm modes
- */
-enum dbg_bus_storm_modes {
- DBG_BUS_STORM_MODE_PRINTF /* store data (fast debug) */,
- DBG_BUS_STORM_MODE_PRAM_ADDR /* pram address (fast debug) */,
- DBG_BUS_STORM_MODE_DRA_RW /* DRA read/write data (fast debug) */,
- DBG_BUS_STORM_MODE_DRA_W /* DRA write data (fast debug) */,
- DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */,
- DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */,
- DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */,
- DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */,
- DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */,
- MAX_DBG_BUS_STORM_MODES
-};
-
-/*
- * Debug bus target IDs
- */
-enum dbg_bus_targets {
- DBG_BUS_TARGET_ID_INT_BUF
- /* records debug bus to DBG block internal buffer */,
- DBG_BUS_TARGET_ID_NIG /* records debug bus to the NW */,
- DBG_BUS_TARGET_ID_PCI /* records debug bus to a PCI buffer */,
- MAX_DBG_BUS_TARGETS
-};
-
-/*
- * GRC Dump data
- */
-struct dbg_grc_data {
- u8 is_updated /* Indicates if the GRC Dump data is updated (0/1) */;
- u8 chip_id /* Chip ID */;
- u8 chip_mask /* Chip mask */;
- u8 reserved;
- __le32 max_dump_dwords /* Max GRC Dump size in dwords */;
- __le32 param_val[40];
- u8 param_set_by_user[40];
-};
-
-/*
- * Debug GRC params
- */
-enum dbg_grc_params {
- DBG_GRC_PARAM_DUMP_TSTORM /* dump Tstorm memories (0/1) */,
- DBG_GRC_PARAM_DUMP_MSTORM /* dump Mstorm memories (0/1) */,
- DBG_GRC_PARAM_DUMP_USTORM /* dump Ustorm memories (0/1) */,
- DBG_GRC_PARAM_DUMP_XSTORM /* dump Xstorm memories (0/1) */,
- DBG_GRC_PARAM_DUMP_YSTORM /* dump Ystorm memories (0/1) */,
- DBG_GRC_PARAM_DUMP_PSTORM /* dump Pstorm memories (0/1) */,
- DBG_GRC_PARAM_DUMP_REGS /* dump non-memory registers (0/1) */,
- DBG_GRC_PARAM_DUMP_RAM /* dump Storm internal RAMs (0/1) */,
- DBG_GRC_PARAM_DUMP_PBUF /* dump Storm passive buffer (0/1) */,
- DBG_GRC_PARAM_DUMP_IOR /* dump Storm IORs (0/1) */,
- DBG_GRC_PARAM_DUMP_VFC /* dump VFC memories (0/1) */,
- DBG_GRC_PARAM_DUMP_CM_CTX /* dump CM contexts (0/1) */,
- DBG_GRC_PARAM_DUMP_PXP /* dump PXP memories (0/1) */,
- DBG_GRC_PARAM_DUMP_RSS /* dump RSS memories (0/1) */,
- DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */,
- DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */,
- DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */,
- DBG_GRC_PARAM_RESERVED /* reserved */,
- DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */,
- DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */,
- DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */,
- DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */,
- DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */,
- DBG_GRC_PARAM_DUMP_NIG /* dump NIG memories (0/1) */,
- DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */,
- DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */,
- DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */,
- DBG_GRC_PARAM_DUMP_TM /* dump TM (timers) memories (0/1) */,
- DBG_GRC_PARAM_DUMP_SDM /* dump SDM memories (0/1) */,
- DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */,
- DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */,
- DBG_GRC_PARAM_NUM_LCIDS /* number of LCIDs (0..320) */,
- DBG_GRC_PARAM_NUM_LTIDS /* number of LTIDs (0..320) */,
- DBG_GRC_PARAM_EXCLUDE_ALL
- /* preset: exclude all memories from dump (1 only) */,
- DBG_GRC_PARAM_CRASH
- /* preset: include memories for crash dump (1 only) */,
- DBG_GRC_PARAM_PARITY_SAFE
- /* perform dump only if MFW is responding (0/1) */,
- DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
- MAX_DBG_GRC_PARAMS
-};
-
-/*
- * Debug reset registers
- */
-enum dbg_reset_regs {
- DBG_RESET_REG_MISCS_PL_UA,
- DBG_RESET_REG_MISCS_PL_HV,
- DBG_RESET_REG_MISC_PL_UA,
- DBG_RESET_REG_MISC_PL_HV,
- DBG_RESET_REG_MISC_PL_PDA_VMAIN_1,
- DBG_RESET_REG_MISC_PL_PDA_VMAIN_2,
- DBG_RESET_REG_MISC_PL_PDA_VAUX,
- MAX_DBG_RESET_REGS
-};
-
-/*
- * @DPDK Debug status codes
- */
-enum dbg_status {
- DBG_STATUS_OK,
- DBG_STATUS_APP_VERSION_NOT_SET,
- DBG_STATUS_UNSUPPORTED_APP_VERSION,
- DBG_STATUS_DBG_BLOCK_NOT_RESET,
- DBG_STATUS_INVALID_ARGS,
- DBG_STATUS_OUTPUT_ALREADY_SET,
- DBG_STATUS_INVALID_PCI_BUF_SIZE,
- DBG_STATUS_PCI_BUF_ALLOC_FAILED,
- DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
- DBG_STATUS_TOO_MANY_INPUTS,
- DBG_STATUS_INPUT_OVERLAP,
- DBG_STATUS_HW_ONLY_RECORDING,
- DBG_STATUS_STORM_ALREADY_ENABLED,
- DBG_STATUS_STORM_NOT_ENABLED,
- DBG_STATUS_BLOCK_ALREADY_ENABLED,
- DBG_STATUS_BLOCK_NOT_ENABLED,
- DBG_STATUS_NO_INPUT_ENABLED,
- DBG_STATUS_NO_FILTER_TRIGGER_64B,
- DBG_STATUS_FILTER_ALREADY_ENABLED,
- DBG_STATUS_TRIGGER_ALREADY_ENABLED,
- DBG_STATUS_TRIGGER_NOT_ENABLED,
- DBG_STATUS_CANT_ADD_CONSTRAINT,
- DBG_STATUS_TOO_MANY_TRIGGER_STATES,
- DBG_STATUS_TOO_MANY_CONSTRAINTS,
- DBG_STATUS_RECORDING_NOT_STARTED,
- DBG_STATUS_NO_DATA_TRIGGERED,
- DBG_STATUS_NO_DATA_RECORDED,
- DBG_STATUS_DUMP_BUF_TOO_SMALL,
- DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
- DBG_STATUS_UNKNOWN_CHIP,
- DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
- DBG_STATUS_BLOCK_IN_RESET,
- DBG_STATUS_INVALID_TRACE_SIGNATURE,
- DBG_STATUS_INVALID_NVRAM_BUNDLE,
- DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
- DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
- DBG_STATUS_NVRAM_READ_FAILED,
- DBG_STATUS_IDLE_CHK_PARSE_FAILED,
- DBG_STATUS_MCP_TRACE_BAD_DATA,
- DBG_STATUS_MCP_TRACE_NO_META,
- DBG_STATUS_MCP_COULD_NOT_HALT,
- DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_DMAE_FAILED,
- DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
- DBG_STATUS_IGU_FIFO_BAD_DATA,
- DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
- DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
- DBG_STATUS_REG_FIFO_BAD_DATA,
- DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
- MAX_DBG_STATUS
-};
-
-/*
- * Debug Storms IDs
- */
-enum dbg_storms {
- DBG_TSTORM_ID,
- DBG_MSTORM_ID,
- DBG_USTORM_ID,
- DBG_XSTORM_ID,
- DBG_YSTORM_ID,
- DBG_PSTORM_ID,
- MAX_DBG_STORMS
-};
-
-/*
- * Idle Check data
- */
-struct idle_chk_data {
- __le32 buf_size /* Idle check buffer size in dwords */;
- u8 buf_size_set
- /* Indicates if the idle check buffer size was set (0/1) */;
- u8 reserved1;
- __le16 reserved2;
-};
-
-/*
- * Idle Check data
- */
-struct mcp_trace_data {
- __le32 buf_size /* MCP Trace buffer size in dwords */;
- u8 buf_size_set
- /* Indicates if the MCP Trace buffer size was set (0/1) */;
- u8 reserved1;
- __le16 reserved2;
-};
-
-/*
- * Debug Tools data (per HW function)
- */
-struct dbg_tools_data {
- struct dbg_grc_data grc /* GRC Dump data */;
- struct dbg_bus_data bus /* Debug Bus data */;
- struct idle_chk_data idle_chk /* Idle Check data */;
- struct mcp_trace_data mcp_trace /* MCP Trace data */;
- u8 block_in_reset[80] /* Indicates if a block is in reset state (0/1) */
- ;
- u8 chip_id /* Chip ID (from enum chip_ids) */;
- u8 chip_mask
- /* Chip mask = bit index chip_id is set, the rest are cleared */;
- u8 initialized /* Indicates if the data was initialized */;
- u8 reset_state_updated
- /* Indicates if blocks reset state is updated (0/1) */;
-};
-
-/*
- * BRB RAM init requirements
- */
-struct init_brb_ram_req {
- __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
- __le32 headroom_per_tc /* headroom size per TC, in bytes */;
- __le32 min_pkt_size /* min packet size, in bytes */;
- __le32 max_ports_per_engine /* min packet size, in bytes */;
- u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
-};
-
-/*
- * ETS per-TC init requirements
- */
-struct init_ets_tc_req {
- u8 use_sp;
- u8 use_wfq;
- __le16 weight /* An arbitration weight. Valid only if use_wfq is set. */
- ;
-};
-
-/*
- * ETS init requirements
- */
-struct init_ets_req {
- __le32 mtu /* Max packet size (in bytes) */;
- struct init_ets_tc_req tc_req[NUM_OF_TCS]
- /* ETS initialization requirements per TC. */;
-};
-
-/*
- * NIG LB RL init requirements
- */
-struct init_nig_lb_rl_req {
- __le16 lb_mac_rate;
- __le16 lb_rate;
- __le32 mtu /* Max packet size (in bytes) */;
- __le16 tc_rate[NUM_OF_PHYS_TCS];
-};
-
-/*
- * NIG TC mapping for each priority
- */
-struct init_nig_pri_tc_map_entry {
- u8 tc_id /* the mapped TC ID */;
- u8 valid /* indicates if the mapping entry is valid */;
-};
-
-/*
- * NIG priority to TC map init requirements
- */
-struct init_nig_pri_tc_map_req {
- struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
-};
-
-/*
- * QM per-port init parameters
- */
-struct init_qm_port_params {
- u8 active /* Indicates if this port is active */;
- u8 num_active_phys_tcs /* number of physical TCs used by this port */;
- __le16 num_pbf_cmd_lines
- /* number of PBF command lines that can be used by this port */;
- __le16 num_btb_blocks
- /* number of BTB blocks that can be used by this port */;
- __le16 reserved;
-};
-
-/*
- * QM per-PQ init parameters
- */
-struct init_qm_pq_params {
- u8 vport_id /* VPORT ID */;
- u8 tc_id /* TC ID */;
- u8 wrr_group /* WRR group */;
- u8 reserved;
-};
-
-/*
- * QM per-vport init parameters
- */
-struct init_qm_vport_params {
- __le32 vport_rl;
- __le16 vport_wfq;
- __le16 first_tx_pq_id[NUM_OF_TCS]
- /* the first Tx PQ ID associated with this VPORT for each TC. */;
-};
-
-#endif /* __ECORE_HSI_TOOLS__ */
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 5403b94b..7f4db0a0 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -23,27 +23,28 @@
#define ECORE_BAR_ACQUIRE_TIMEOUT 1000
/* Invalid values */
-#define ECORE_BAR_INVALID_OFFSET -1
+#define ECORE_BAR_INVALID_OFFSET (OSAL_CPU_TO_LE32(-1))
struct ecore_ptt {
osal_list_entry_t list_entry;
unsigned int idx;
struct pxp_ptt_entry pxp;
+ u8 hwfn_id;
};
struct ecore_ptt_pool {
osal_list_t free_list;
- osal_spinlock_t lock;
+ osal_spinlock_t lock; /* ptt synchronized access */
struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
};
enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
{
- struct ecore_ptt_pool *p_pool;
+ struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev,
+ GFP_KERNEL,
+ sizeof(*p_pool));
int i;
- p_pool = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(struct ecore_ptt_pool));
if (!p_pool)
return ECORE_NOMEM;
@@ -52,6 +53,7 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
p_pool->ptts[i].idx = i;
p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
p_pool->ptts[i].pxp.pretend.control = 0;
+ p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
/* There are special PTT entries that are taken only by design.
* The rest are added ot the list for general usage.
@@ -95,32 +97,36 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
/* Take the free PTT from the list */
for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
- if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list))
- break;
- OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
- OSAL_MSLEEP(1);
- }
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) {
+ p_ptt = OSAL_LIST_FIRST_ENTRY(
+ &p_hwfn->p_ptt_pool->free_list,
+ struct ecore_ptt, list_entry);
+ OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
+ &p_hwfn->p_ptt_pool->free_list);
- /* We should not time-out, but it can happen... --> Lock isn't held */
- if (i == ECORE_BAR_ACQUIRE_TIMEOUT) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate PTT\n");
- return OSAL_NULL;
- }
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
- p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
- struct ecore_ptt, list_entry);
- OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
- &p_hwfn->p_ptt_pool->free_list);
- OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "allocated ptt %d\n", p_ptt->idx);
+
+ return p_ptt;
+ }
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "allocated ptt %d\n", p_ptt->idx);
+ OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
+ OSAL_MSLEEP(1);
+ }
- return p_ptt;
+ DP_NOTICE(p_hwfn, true,
+ "PTT acquire timeout - failed to allocate PTT\n");
+ return OSAL_NULL;
}
void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
/* This PTT should not be set to pretend if it is being released */
+ /* TODO - add some pretend sanity checks, to make sure pretend
+ * isn't set on this ptt
+ */
OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
@@ -130,7 +136,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
- return p_ptt->pxp.offset << 2;
+ return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
}
static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
@@ -161,11 +167,12 @@ void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
p_ptt->idx, new_hw_addr);
/* The HW is using DWORDS and the address is in Bytes */
- p_ptt->pxp.offset = new_hw_addr >> 2;
+ p_ptt->pxp.offset = OSAL_CPU_TO_LE32(new_hw_addr >> 2);
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
- OFFSETOF(struct pxp_ptt_entry, offset), p_ptt->pxp.offset);
+ OFFSETOF(struct pxp_ptt_entry, offset),
+ OSAL_LE32_TO_CPU(p_ptt->pxp.offset));
}
static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
@@ -176,6 +183,11 @@ static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
offset = hw_addr - win_hw_addr;
+ if (p_ptt->hwfn_id != p_hwfn->my_id)
+ DP_NOTICE(p_hwfn, true,
+ "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
+ p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
+
/* Verify the address is within the window */
if (hw_addr < win_hw_addr ||
offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
@@ -198,11 +210,37 @@ struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
}
+static bool ecore_is_reg_fifo_empty(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ bool is_empty = true;
+ u32 bar_addr;
+
+ if (!p_hwfn->p_dev->chk_reg_fifo)
+ goto out;
+
+ /* ecore_rd() cannot be used here since it calls this function */
+ bar_addr = ecore_set_ptt(p_hwfn, p_ptt, GRC_REG_TRACE_FIFO_VALID_DATA);
+ is_empty = REG_RD(p_hwfn, bar_addr) == 0;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
+ OSAL_UDELAY(100);
+#endif
+
+out:
+ return is_empty;
+}
+
void ecore_wr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
{
- u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+ bool prev_fifo_err;
+ u32 bar_addr;
+
+ prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
+ bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
REG_WR(p_hwfn, bar_addr, val);
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
@@ -212,12 +250,21 @@ void ecore_wr(struct ecore_hwfn *p_hwfn,
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
OSAL_UDELAY(100);
#endif
+
+ OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
+ "reg_fifo err was caused by a call to ecore_wr(0x%x, 0x%x)\n",
+ hw_addr, val);
}
u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
{
- u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
- u32 val = REG_RD(p_hwfn, bar_addr);
+ bool prev_fifo_err;
+ u32 bar_addr, val;
+
+ prev_fifo_err = !ecore_is_reg_fifo_empty(p_hwfn, p_ptt);
+
+ bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
+ val = REG_RD(p_hwfn, bar_addr);
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
@@ -228,6 +275,10 @@ u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
OSAL_UDELAY(100);
#endif
+ OSAL_WARN(!prev_fifo_err && !ecore_is_reg_fifo_empty(p_hwfn, p_ptt),
+ "reg_fifo error was caused by a call to ecore_rd(0x%x)\n",
+ hw_addr);
+
return val;
}
@@ -292,60 +343,59 @@ void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 fid)
{
- void *p_pretend;
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
- /* Every pretend undos prev pretends, including previous port pretend */
+/* Every pretend undos prev pretends, including previous port pretend */
+
SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
- p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
- p_pretend = &p_ptt->pxp.pretend;
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
- OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
}
void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 port_id)
{
- void *p_pretend;
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
- p_ptt->pxp.pretend.control = control;
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
- p_pretend = &p_ptt->pxp.pretend;
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
- OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
}
void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
- void *p_pretend;
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
- p_ptt->pxp.pretend.control = control;
- p_pretend = &p_ptt->pxp.pretend;
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+
REG_WR(p_hwfn,
ecore_ptt_config_addr(p_ptt) +
- OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
}
u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
@@ -442,15 +492,16 @@ static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
{
OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
- return DMAE_REG_GO_C0 + idx * 4;
+ /* All the DMAE 'go' registers form an array in internal memory */
+ return DMAE_REG_GO_C0 + (idx << 2);
}
static enum _ecore_status_t
ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
- enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
/* verify address is not OSAL_NULL */
if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
@@ -459,13 +510,14 @@ ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
"source or destination address 0 idx_cmd=%d\n"
"opcode = [0x%08x,0x%04x] len=0x%x"
" src=0x%x:%x dst=0x%x:%x\n",
- idx_cmd, (u32)p_command->opcode,
- (u16)p_command->opcode_b,
- (int)p_command->length,
- (int)p_command->src_addr_hi,
- (int)p_command->src_addr_lo,
- (int)p_command->dst_addr_hi,
- (int)p_command->dst_addr_lo);
+ idx_cmd,
+ OSAL_LE32_TO_CPU(p_command->opcode),
+ OSAL_LE16_TO_CPU(p_command->opcode_b),
+ OSAL_LE16_TO_CPU(p_command->length_dw),
+ OSAL_LE32_TO_CPU(p_command->src_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->src_addr_lo),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
return ECORE_INVAL;
}
@@ -473,12 +525,14 @@ ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
"len=0x%x src=0x%x:%x dst=0x%x:%x\n",
- idx_cmd, (u32)p_command->opcode,
- (u16)p_command->opcode_b,
- (int)p_command->length,
- (int)p_command->src_addr_hi,
- (int)p_command->src_addr_lo,
- (int)p_command->dst_addr_hi, (int)p_command->dst_addr_lo);
+ idx_cmd,
+ OSAL_LE32_TO_CPU(p_command->opcode),
+ OSAL_LE16_TO_CPU(p_command->opcode_b),
+ OSAL_LE16_TO_CPU(p_command->length_dw),
+ OSAL_LE32_TO_CPU(p_command->src_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->src_addr_lo),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_hi),
+ OSAL_LE32_TO_CPU(p_command->dst_addr_lo));
/* Copy the command to DMAE - need to do it before every call
* for source/dest address no reset.
@@ -514,8 +568,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
if (*p_comp == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `p_completion_word'\n");
- ecore_dmae_info_free(p_hwfn);
- return ECORE_NOMEM;
+ goto err;
}
p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
@@ -524,8 +577,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
if (*p_cmd == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct dmae_cmd'\n");
- ecore_dmae_info_free(p_hwfn);
- return ECORE_NOMEM;
+ goto err;
}
p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
@@ -534,14 +586,15 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
if (*p_buff == OSAL_NULL) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `intermediate_buffer'\n");
- ecore_dmae_info_free(p_hwfn);
- return ECORE_NOMEM;
+ goto err;
}
- /* DMAE_E4_TODO : Need to change this to reflect proper channel */
p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
return ECORE_SUCCESS;
+err:
+ ecore_dmae_info_free(p_hwfn);
+ return ECORE_NOMEM;
}
void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
@@ -580,8 +633,8 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
{
- enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u32 wait_cnt_limit = 10000, wait_cnt = 0;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
#ifndef ASIC_ONLY
u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
@@ -598,9 +651,6 @@ static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
*/
OSAL_BARRIER(p_hwfn->p_dev);
while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
- /* DMAE_E4_TODO : using OSAL_MSLEEP instead of mm_wait since mm
- * functions are getting depriciated. Need to review for future.
- */
OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
if (++wait_cnt > wait_cnt_limit) {
DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
@@ -629,7 +679,7 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u64 src_addr,
u64 dst_addr,
- u8 src_type, u8 dst_type, u32 length)
+ u8 src_type, u8 dst_type, u32 length_dw)
{
dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
@@ -638,16 +688,16 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
switch (src_type) {
case ECORE_DMAE_ADDRESS_GRC:
case ECORE_DMAE_ADDRESS_HOST_PHYS:
- cmd->src_addr_hi = DMA_HI(src_addr);
- cmd->src_addr_lo = DMA_LO(src_addr);
+ cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(src_addr));
+ cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(src_addr));
break;
/* for virt source addresses we use the intermediate buffer. */
case ECORE_DMAE_ADDRESS_HOST_VIRT:
- cmd->src_addr_hi = DMA_HI(phys);
- cmd->src_addr_lo = DMA_LO(phys);
+ cmd->src_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
+ cmd->src_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
(void *)(osal_uintptr_t)src_addr,
- length * sizeof(u32));
+ length_dw * sizeof(u32));
break;
default:
return ECORE_INVAL;
@@ -656,26 +706,26 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
switch (dst_type) {
case ECORE_DMAE_ADDRESS_GRC:
case ECORE_DMAE_ADDRESS_HOST_PHYS:
- cmd->dst_addr_hi = DMA_HI(dst_addr);
- cmd->dst_addr_lo = DMA_LO(dst_addr);
+ cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(dst_addr));
+ cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(dst_addr));
break;
/* for virt destination address we use the intermediate buff. */
case ECORE_DMAE_ADDRESS_HOST_VIRT:
- cmd->dst_addr_hi = DMA_HI(phys);
- cmd->dst_addr_lo = DMA_LO(phys);
+ cmd->dst_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
+ cmd->dst_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
break;
default:
return ECORE_INVAL;
}
- cmd->length = (u16)length;
+ cmd->length_dw = OSAL_CPU_TO_LE16((u16)length_dw);
if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
OSAL_DMA_SYNC(p_hwfn->p_dev,
(void *)HILO_U64(cmd->src_addr_hi,
cmd->src_addr_lo),
- length * sizeof(u32), false);
+ length_dw * sizeof(u32), false);
ecore_dmae_post_command(p_hwfn, p_ptt);
@@ -687,21 +737,21 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
OSAL_DMA_SYNC(p_hwfn->p_dev,
(void *)HILO_U64(cmd->src_addr_hi,
cmd->src_addr_lo),
- length * sizeof(u32), true);
+ length_dw * sizeof(u32), true);
if (ecore_status != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, ECORE_MSG_HW,
"ecore_dmae_host2grc: Wait Failed. source_addr"
" 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n",
(unsigned long)src_addr, (unsigned long)dst_addr,
- length);
+ length_dw);
return ecore_status;
}
if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
&p_hwfn->dmae_info.p_intermediate_buffer[0],
- length * sizeof(u32));
+ length_dw * sizeof(u32));
return ECORE_SUCCESS;
}
@@ -719,18 +769,18 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
- enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u64 src_addr_split = 0, dst_addr_split = 0;
u16 length_limit = DMAE_MAX_RW_SIZE;
+ enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u32 offset = 0;
ecore_dmae_opcode(p_hwfn,
(src_type == ECORE_DMAE_ADDRESS_GRC),
(dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
- cmd->comp_addr_lo = DMA_LO(phys);
- cmd->comp_addr_hi = DMA_HI(phys);
- cmd->comp_val = DMAE_COMPLETION_VAL;
+ cmd->comp_addr_lo = OSAL_CPU_TO_LE32(DMA_LO(phys));
+ cmd->comp_addr_hi = OSAL_CPU_TO_LE32(DMA_HI(phys));
+ cmd->comp_val = OSAL_CPU_TO_LE32(DMAE_COMPLETION_VAL);
/* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
cnt_split = size_in_dwords / length_limit;
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index 89499447..0750b2ed 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -105,7 +105,8 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
*
* @return u32
*/
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
@@ -115,7 +116,7 @@ u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
*
* @return u32
*/
-u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
+u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address
@@ -124,8 +125,9 @@ u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
* @param new_hw_addr
* @param p_ptt
*/
-void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 new_hw_addr);
+void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 new_hw_addr);
/**
* @brief ecore_get_reserved_ptt - Get a specific reserved PTT
@@ -135,8 +137,8 @@ void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
*
* @return struct ecore_ptt *
*/
-struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
- enum reserved_ptts ptt_idx);
+struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx);
/**
* @brief ecore_wr - Write value to BAR using the given ptt
@@ -146,8 +148,10 @@ struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
* @param val
* @param hw_addr
*/
-void ecore_wr(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 hw_addr, u32 val);
+void ecore_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr,
+ u32 val);
/**
* @brief ecore_rd - Read value from BAR using the given ptt
@@ -157,7 +161,9 @@ void ecore_wr(struct ecore_hwfn *p_hwfn,
* @param val
* @param hw_addr
*/
-u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr);
+u32 ecore_rd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr);
/**
* @brief ecore_memcpy_from - copy n bytes from BAR using the given
@@ -169,9 +175,11 @@ u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr);
* @param hw_addr
* @param n
*/
-void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- void *dest, u32 hw_addr, osal_size_t n);
+void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ void *dest,
+ u32 hw_addr,
+ osal_size_t n);
/**
* @brief ecore_memcpy_to - copy n bytes to BAR using the given
@@ -183,9 +191,11 @@ void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
* @param src
* @param n
*/
-void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 hw_addr, void *src, osal_size_t n);
+void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 hw_addr,
+ void *src,
+ osal_size_t n);
/**
* @brief ecore_fid_pretend - pretend to another function when
* accessing the ptt window. There is no way to unpretend
@@ -197,8 +207,9 @@ void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
* @param fid - fid field of pxp_pretend structure. Can contain
* either pf / vf, port/path fields are don't care.
*/
-void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 fid);
+void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 fid);
/**
* @brief ecore_port_pretend - pretend to another port when
@@ -208,8 +219,9 @@ void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
* @param p_ptt
* @param port_id - the port to pretend to
*/
-void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 port_id);
+void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 port_id);
/**
* @brief ecore_port_unpretend - cancel any previously set port
@@ -218,7 +230,8 @@ void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
*/
-void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief ecore_vfid_to_concrete - build a concrete FID for a
@@ -235,7 +248,7 @@ u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid);
* which is part of p_hwfn.
* @param p_hwfn
*/
-enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_dmae_info_free - Free the dmae_info structure
@@ -243,10 +256,14 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
*
* @param p_hwfn
*/
-void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
+void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
union ecore_qm_pq_params {
struct {
+ u8 q_idx;
+ } iscsi;
+
+ struct {
u8 tc;
} core;
@@ -255,10 +272,20 @@ union ecore_qm_pq_params {
u8 vf_id;
u8 tc;
} eth;
+
+ struct {
+ u8 dcqcn;
+ u8 qpid; /* roce relative */
+ } roce;
+
+ struct {
+ u8 qidx;
+ } iwarp;
};
-u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
- enum protocol_type proto, union ecore_qm_pq_params *params);
+u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
+ enum protocol_type proto,
+ union ecore_qm_pq_params *params);
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
const u8 *fw_data);
diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h
index fa518cec..4456af43 100644
--- a/drivers/net/qede/base/ecore_hw_defs.h
+++ b/drivers/net/qede/base/ecore_hw_defs.h
@@ -10,19 +10,30 @@
#define _ECORE_IGU_DEF_H_
/* Fields of IGU PF CONFIGRATION REGISTER */
-#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
-#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
-#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
-#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
-#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
-#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+/* function enable */
+#define IGU_PF_CONF_FUNC_EN (0x1 << 0)
+/* MSI/MSIX enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1)
+/* INT enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2)
+/* attention enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3)
+/* single ISR mode enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4)
+/* simd all ones mode */
+#define IGU_PF_CONF_SIMD_MODE (0x1 << 5)
/* Fields of IGU VF CONFIGRATION REGISTER */
-#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
-#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
-#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
-#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
-#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
+/* function enable */
+#define IGU_VF_CONF_FUNC_EN (0x1 << 0)
+/* MSI/MSIX enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1)
+/* single ISR mode enable */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4)
+/* Parent PF */
+#define IGU_VF_CONF_PARENT_MASK (0xF)
+/* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 5
/* Igu control commands
*/
@@ -36,13 +47,13 @@ enum igu_ctrl_cmd {
*/
struct igu_ctrl_reg {
u32 ctrl_data;
-#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
+#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
#define IGU_CTRL_REG_FID_SHIFT 0
-#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
#define IGU_CTRL_REG_RESERVED_MASK 0x1
#define IGU_CTRL_REG_RESERVED_SHIFT 28
-#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
#define IGU_CTRL_REG_TYPE_SHIFT 31
};
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index 5324e052..e83eeb81 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -12,11 +12,12 @@
#include "reg_addr.h"
#include "ecore_rt_defs.h"
#include "ecore_hsi_common.h"
-#include "ecore_hsi_tools.h"
+#include "ecore_hsi_init_func.h"
+#include "ecore_hsi_eth.h"
+#include "ecore_hsi_init_tool.h"
+#include "ecore_iro.h"
#include "ecore_init_fw_funcs.h"
-
-/* @DPDK CmInterfaceEnum */
-enum cm_interface_enum {
+enum CmInterfaceEnum {
MCM_SEC,
MCM_PRI,
UCM_SEC,
@@ -50,17 +51,23 @@ enum cm_interface_enum {
#define QM_RL_UPPER_BOUND 62500000
#define QM_RL_PERIOD 5
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
-#define QM_RL_INC_VAL(rate) \
-OSAL_MAX_T(u32, (((rate ? rate : 1000000) * QM_RL_PERIOD * 1.01) / 8), 1)
#define QM_RL_MAX_INC_VAL 43750000
+/* RL increment value - the factor of 1.01 was added after seeing only
+ * 99% factor reached in a 25Gbps port with DPDK RFC 2544 test.
+ * In this scenario the PF RL was reducing the line rate to 99% although
+ * the credit increment value was the correct one and FW calculated
+ * correct packet sizes. The reason for the inaccuracy of the RL is
+ * unknown at this point.
+ */
+/* rate in mbps */
+#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
+ QM_RL_PERIOD * 101) / (8 * 100)), 1)
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1
-#define EAGLE_WORKAROUND_TC 7
/* Command Queue constants */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8 /* eagle workaround CmdQ */
#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
@@ -72,8 +79,8 @@ voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
/* BTB: blocks constants (block size = 256B) */
#define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */
-#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS /* headroom per-port */
-#define BTB_EAGLE_WORKAROUND_BLOCKS 4 /* eagle workaround blocks */
+/* headroom per-port */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
#define BTB_PURE_LB_FACTOR 10
#define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */
/* QM stop command constants */
@@ -169,6 +176,9 @@ static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
u8 voq, u16 cmdq_lines)
{
u32 qm_line_crd;
+ /* In A0 - Limit the size of pbf queue so that only 511 commands
+ * with the minimum size of 4 (FCoE minimum size)
+ */
bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
if (is_bb_a0)
cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022);
@@ -187,44 +197,38 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id;
- bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
+ u8 tc, voq, port_id, num_tcs_in_port;
/* clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (port_params[port_id].active) {
u16 phys_lines, phys_lines_per_tc;
+ /* find #lines to divide between active physical TCs */
phys_lines =
port_params[port_id].num_pbf_cmd_lines -
PBF_CMDQ_PURE_LB_LINES;
- if (eagle_workaround)
- phys_lines -= PBF_CMDQ_EAGLE_WORKAROUND_LINES;
/* find #lines per active physical TC */
- phys_lines_per_tc =
- phys_lines /
- port_params[port_id].num_active_phys_tcs;
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ }
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
/* init registers per active TC */
- for (tc = 0;
- tc < port_params[port_id].num_active_phys_tcs;
- tc++) {
- voq =
- PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
- phys_lines_per_tc);
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn,
+ voq, phys_lines_per_tc);
+ }
}
/* init registers for pure LB TC */
ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
PBF_CMDQ_PURE_LB_LINES);
- /* init registers for eagle workaround */
- if (eagle_workaround) {
- voq =
- PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
- max_phys_tcs_per_port);
- ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
- PBF_CMDQ_EAGLE_WORKAROUND_LINES);
- }
}
}
}
@@ -255,20 +259,24 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id;
+ u8 tc, voq, port_id, num_tcs_in_port;
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (port_params[port_id].active) {
/* subtract headroom blocks */
usable_blocks =
port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
- if (eagle_workaround)
- usable_blocks -= BTB_EAGLE_WORKAROUND_BLOCKS;
+/* find blocks per physical TC. use factor to avoid floating arithmethic */
+
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
pure_lb_blocks =
(usable_blocks * BTB_PURE_LB_FACTOR) /
- (port_params[port_id].num_active_phys_tcs *
+ (num_tcs_in_port *
BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
pure_lb_blocks =
OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
@@ -276,32 +284,24 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
phys_blocks =
(usable_blocks -
pure_lb_blocks) /
- port_params[port_id].num_active_phys_tcs;
+ num_tcs_in_port;
/* init physical TCs */
for (tc = 0;
- tc < port_params[port_id].num_active_phys_tcs;
+ tc < NUM_OF_PHYS_TCS;
tc++) {
- voq =
- PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn,
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
PBF_BTB_GUARANTEED_RT_OFFSET(voq),
phys_blocks);
+ }
}
/* init pure LB TC */
STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ
- (port_id)),
- pure_lb_blocks);
- /* init eagle workaround */
- if (eagle_workaround) {
- voq =
- PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC,
- max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(voq),
- BTB_EAGLE_WORKAROUND_BLOCKS);
- }
+ PBF_BTB_GUARANTEED_RT_OFFSET(
+ LB_VOQ(port_id)), pure_lb_blocks);
}
}
}
@@ -350,6 +350,10 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u8 voq =
VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
bool is_vf_pq = (i >= num_pf_pqs);
+ /* added to avoid compilation warning */
+ u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ bool rl_valid = pq_params[i].rl_valid &&
+ pq_params[i].vport_id < max_qm_global_rls;
/* update first Tx PQ of VPORT/TC */
u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
u16 first_tx_pq_id =
@@ -366,14 +370,19 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
(voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
+ /* check RL ID */
+ if (pq_params[i].rl_valid && pq_params[i].vport_id >=
+ max_qm_global_rls)
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT ID for rate limiter config");
/* fill PQ map entry */
OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
- is_vf_pq ? 1 : 0);
+ rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- is_vf_pq ? pq_params[i].vport_id : 0);
+ rl_valid ? pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
pq_params[i].wrr_group);
@@ -385,6 +394,9 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
mem_addr_4kb);
/* check if VF PQ */
if (is_vf_pq) {
+ /* if PQ is associated with a VF, add indication to PQ
+ * VF mask
+ */
tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
(1 << (pq_id % tx_pq_vf_mask_width));
mem_addr_4kb += vport_pq_mem_4kb;
@@ -396,10 +408,13 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
for (i = 0; i < num_tx_pq_vf_masks; i++) {
if (tx_pq_vf_mask[i]) {
if (is_bb_a0) {
+ /* A0-only: perform read-modify-write
+ *(fixed in B0)
+ */
u32 curr_mask =
is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt,
QM_REG_MAXPQSIZETXSEL_0
- + i * 4);
+ + i * 4);
STORE_RT_REG(p_hwfn,
QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
i, curr_mask | tx_pq_vf_mask[i]);
@@ -419,6 +434,8 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u32 num_tids, u32 base_mem_addr_4kb)
{
u16 i, pq_id;
+/* a single other PQ grp is used in each PF, where PQ group i is used in PF i */
+
u16 pq_group = pf_id;
u32 pq_size = num_pf_cids + num_tids;
u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
@@ -437,7 +454,7 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
mem_addr_4kb += pq_mem_4kb;
}
}
-
+/* Prepare PF WFQ runtime init values for specified PF. Return -1 on error. */
static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
@@ -461,15 +478,14 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 voq =
VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
- QM_WFQ_CRD_REG_SIGN_BIT);
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
}
STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
+ QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
return 0;
}
-
-/* Prepare PF RL runtime init values for the specified PF. Return -1 on err */
+/* Prepare PF RL runtime init values for specified PF. Return -1 on error. */
static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
@@ -478,13 +494,15 @@ static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
- QM_RL_CRD_REG_SIGN_BIT);
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
return 0;
}
-
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs. Return -1 on
+ * error.
+ */
static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 num_vports,
struct init_qm_vport_params *vport_params)
@@ -500,14 +518,17 @@ static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
"Invalid VPORT WFQ weight config");
return -1;
}
+ /* each VPORT can have several VPORT PQ IDs for
+ * different TCs
+ */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
u16 vport_pq_id =
vport_params[i].first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPCRD_RT_OFFSET +
- vport_pq_id,
- QM_WFQ_CRD_REG_SIGN_BIT);
+ QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPWEIGHT_RT_OFFSET
+ vport_pq_id, inc_val);
@@ -518,13 +539,20 @@ static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
return 0;
}
-/* Prepare VPORT RL runtime init values for specified VPORT. Ret -1 on error. */
+/* Prepare VPORT RL runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
+ if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT ID for rate limiter configuration");
+ return -1;
+ }
/* go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
@@ -534,10 +562,10 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
return -1;
}
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
- QM_RL_CRD_REG_SIGN_BIT);
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT);
+ QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
@@ -555,7 +583,7 @@ static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
}
/* check if timeout while waiting for SDM command ready */
if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
"Timeout waiting for QM SDM cmd ready signal\n");
return false;
}
@@ -597,7 +625,6 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- u8 port_id;
/* init AFullOprtnstcCrdMask */
u32 mask =
(QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
@@ -610,18 +637,6 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
(QM_OPPOR_PQ_EMPTY_DEF <<
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
- /* check eagle workaround */
- for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- if (port_params[port_id].active &&
- port_params[port_id].num_active_phys_tcs >
- EAGLE_WORKAROUND_TC &&
- ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
- DP_NOTICE(p_hwfn, true,
- "Can't config 8 TCs with Eagle"
- " eng1 workaround");
- return -1;
- }
- }
/* enable/disable PF RL */
ecore_enable_pf_rl(p_hwfn, pf_rl_en);
/* enable/disable PF WFQ */
@@ -716,7 +731,7 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
return -1;
}
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
- QM_RL_CRD_REG_SIGN_BIT);
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
return 0;
}
@@ -745,14 +760,20 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
{
- u32 inc_val = QM_RL_INC_VAL(vport_rl);
+ u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+ if (vport_id >= max_qm_global_rls) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT ID for rate limiter configuration");
+ return -1;
+ }
+ inc_val = QM_RL_INC_VAL(vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT rate-limit configuration");
return -1;
}
ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
- QM_RL_CRD_REG_SIGN_BIT);
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
return 0;
}
@@ -1098,6 +1119,8 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
BRB_HYST_BLOCKS);
+/* init pause/full thresholds per physical TC - for loopback traffic */
+
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
@@ -1110,6 +1133,7 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
reg_offset, pause_xon_th);
+/* init pause/full thresholds per physical TC - for main traffic */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
@@ -1128,22 +1152,22 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
/*In MF should be called once per engine to set EtherType of OuterTag*/
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 eth_type)
+ struct ecore_ptt *p_ptt, u32 ethType)
{
/* update PRS register */
- STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
/* update NIG register */
- STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
/* update PBF register */
- STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
}
/*In MF should be called once per port to set EtherType of OuterTag*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 eth_type)
+ struct ecore_ptt *p_ptt, u32 ethType)
{
/* update DORQ register */
- STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, eth_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
}
#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
@@ -1155,7 +1179,7 @@ void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
/* update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
/* update NIG register */
- ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
/* update PBF register */
ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
@@ -1172,7 +1196,7 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
}
/* update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
@@ -1201,7 +1225,7 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
}
/* update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
@@ -1252,7 +1276,7 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
- PRS_ETH_TUNN_FIC_FORMAT);
+ (u32)PRS_ETH_TUNN_FIC_FORMAT);
}
/* update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
@@ -1273,3 +1297,179 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
ip_geneve_enable ? 1 : 0);
}
+
+#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
+#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
+#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
+#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define CAM_LINE_SIZE sizeof(u32)
+#define RAM_LINE_SIZE sizeof(u64)
+#define REG_SIZE sizeof(u32)
+
+void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ /* set RFS event ID to be awakened i Tstorm By Prs */
+ u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+}
+
+void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4,
+ bool ipv6)
+{
+ u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ union gft_cam_line_union camLine;
+ struct gft_ram_line ramLine;
+ u32 *ramLinePointer = (u32 *)&ramLine;
+ int i;
+ if (!ipv6 && !ipv4)
+ DP_NOTICE(p_hwfn, true,
+ "set_rfs_mode_enable: must accept at "
+ "least on of - ipv4 or ipv6");
+ if (!tcp && !udp)
+ DP_NOTICE(p_hwfn, true,
+ "set_rfs_mode_enable: must accept at "
+ "least on of - udp or tcp");
+ /* set RFS event ID to be awakened i Tstorm By Prs */
+ rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
+ PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+ /* Configure Registers for RFS mode */
+/* enable gft search */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
+ * context only cid
+ * in PRS on match
+ */
+ camLine.cam_line_mapped.camline = 0;
+ /* cam line is now valid!! */
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_VALID, 1);
+ /* filters are per PF!! */
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+ if (!(tcp && udp)) {
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+ if (tcp)
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_TCP_PROTOCOL);
+ else
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_UDP_PROTOCOL);
+ }
+ if (!(ipv4 && ipv6)) {
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ if (ipv4)
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV4);
+ else
+ SET_FIELD(camLine.cam_line_mapped.camline,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV6);
+ }
+ /* write characteristics to cam */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ camLine.cam_line_mapped.camline);
+ camLine.cam_line_mapped.camline =
+ ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
+ /* write line to RAM - compare to filter 4 tuple */
+ ramLine.low32bits = 0;
+ ramLine.high32bits = 0;
+ SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_DST_PORT, 1);
+ /* each iteration write to reg */
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id +
+ i * REG_SIZE, *(ramLinePointer + i));
+ /* set default profile so that no filter match will happen */
+ ramLine.low32bits = 0xffff;
+ ramLine.high32bits = 0xffff;
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
+ i * REG_SIZE, *(ramLinePointer + i));
+}
+
+/* Configure VF zone size mode*/
+void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u16 mode,
+ bool runtime_init)
+{
+ u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
+ u32 msdm_vf_offset_mask;
+ if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
+ msdm_vf_size_log += 1;
+ else if (mode == VF_ZONE_SIZE_MODE_QUAD)
+ msdm_vf_size_log += 2;
+ msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
+ if (runtime_init) {
+ STORE_RT_REG(p_hwfn,
+ PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
+ msdm_vf_size_log);
+ STORE_RT_REG(p_hwfn,
+ PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
+ msdm_vf_offset_mask);
+ } else {
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
+ ecore_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
+ }
+}
+
+/* get mstorm statistics for offset by VF zone size mode*/
+u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
+ u16 stat_cnt_id,
+ u16 vf_zone_size_mode)
+{
+ u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
+ if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
+ (stat_cnt_id > MAX_NUM_PFS)) {
+ if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
+ offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ (stat_cnt_id - MAX_NUM_PFS);
+ else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
+ offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ (stat_cnt_id - MAX_NUM_PFS);
+ }
+ return offset;
+}
+
+/* get mstorm VF producer offset by VF zone size mode*/
+u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
+ u8 vf_id,
+ u8 vf_queue_id,
+ u16 vf_zone_size_mode)
+{
+ u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
+ if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
+ if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
+ offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ vf_id;
+ else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
+ offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
+ vf_id;
+ }
+ return offset;
+}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 5280cd7a..9df0e7de 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -26,12 +26,14 @@ struct init_qm_pq_params;
* @return The required host memory size in 4KB units.
*/
u32 ecore_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs);
/**
- * @brief ecore_qm_common_rt_init -
- * Prepare QM runtime init values for the engine phase
+ * @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine
+ * phase
*
* @param p_hwfn
* @param max_ports_per_engine - max number of ports per engine in HW
@@ -40,38 +42,64 @@ u32 ecore_qm_pf_mem_size(u8 pf_id,
* @param pf_wfq_en - enable per-PF WFQ
* @param vport_rl_en - enable per-VPORT rate limiters
* @param vport_wfq_en - enable per-VPORT WFQ
- * @param port_params- array of size MAX_NUM_PORTS with parameters for each port
+ * @param port_params - array of size MAX_NUM_PORTS with params for each port
*
* @return 0 on success, -1 on error.
*/
int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
- u8 max_ports_per_engine,
- u8 max_phys_tcs_per_port,
- bool pf_rl_en,
- bool pf_wfq_en,
- bool vport_rl_en,
- bool vport_wfq_en,
- struct init_qm_port_params
- port_params[MAX_NUM_PORTS]);
-
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ bool pf_rl_en,
+ bool pf_wfq_en,
+ bool vport_rl_en,
+ bool vport_wfq_en,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS]);
+/**
+ * @brief ecore_qm_pf_rt_init Prepare QM runtime init values for the PF phase
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param port_id - port ID
+ * @param pf_id - PF ID
+ * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
+ * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param start_pq - first Tx PQ ID associated with this PF
+ * @param num_pf_pqs - number of Tx PQs associated with this PF (non-VF)
+ * @param num_vf_pqs - number of Tx PQs associated with a VF
+ * @param start_vport - first VPORT ID associated with this PF
+ * @param num_vports - number of VPORTs associated with this PF
+ * @param pf_wfq - WFQ weight. if PF WFQ is globally disabled, the weight must
+ * be 0. otherwise, the weight must be non-zero.
+ * @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't
+ * configure. ignored if PF RL is globally disabled.
+ * @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for
+ * each Tx PQ associated with the specified PF.
+ * @param vport_params - array of size num_vports with parameters for each
+ * associated VPORT.
+ *
+ * @return 0 on success, -1 on error.
+ */
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 port_id,
- u8 pf_id,
- u8 max_phys_tcs_per_port,
- bool is_first_pf,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids,
- u16 start_pq,
- u16 num_pf_pqs,
- u16 num_vf_pqs,
- u8 start_vport,
- u8 num_vports,
- u16 pf_wfq,
- u32 pf_rl,
- struct init_qm_pq_params *pq_params,
- struct init_qm_vport_params *vport_params);
+ struct ecore_ptt *p_ptt,
+ u8 port_id,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ bool is_first_pf,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u8 num_vports,
+ u16 pf_wfq,
+ u32 pf_rl,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params);
/**
* @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
*
@@ -83,7 +111,9 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
* @return 0 on success, -1 on error.
*/
int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u16 pf_wfq);
/**
* @brief ecore_init_pf_rl Initializes the rate limit of the specified PF
*
@@ -95,9 +125,11 @@ int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
* @return 0 on success, -1 on error.
*/
int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl);
+ struct ecore_ptt *p_ptt,
+ u8 pf_id,
+ u32 pf_rl);
/**
- * @brief ecore_init_vport_wfq Initializes the WFQ weight of the specified VPORT
+ * @brief ecore_init_vport_wfq Initializes the WFQ weight of specified VPORT
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
@@ -109,8 +141,9 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
+ struct ecore_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS],
+ u16 vport_wfq);
/**
* @brief ecore_init_vport_rl Initializes the rate limit of the specified VPORT
*
@@ -122,7 +155,9 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl);
+ struct ecore_ptt *p_ptt,
+ u8 vport_id,
+ u32 vport_rl);
/**
* @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
*
@@ -133,13 +168,16 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
* @param start_pq - first PQ ID to stop
* @param num_pqs - Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occurred while
- * waiting for QM command done.
+ * @return bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
*/
bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- bool is_release_cmd,
- bool is_tx_pq, u16 start_pq, u16 num_pqs);
+ struct ecore_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq,
+ u16 start_pq,
+ u16 num_pqs);
+#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
*
@@ -152,8 +190,9 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
* requirements are ignored when is_lb is cleared.
*/
void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_ets_req *req, bool is_lb);
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req,
+ bool is_lb);
/**
* @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
*
@@ -163,8 +202,9 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
* @param req - the NIG LB RLs initialization requirements.
*/
void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_nig_lb_rl_req *req);
+ struct ecore_ptt *p_ptt,
+ struct init_nig_lb_rl_req *req);
+#endif /* UNUSED_HSI_FUNC */
/**
* @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
*
@@ -174,8 +214,9 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
* @param req - required mapping from prioirties to TCs.
*/
void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_nig_pri_tc_map_req *req);
+ struct ecore_ptt *p_ptt,
+ struct init_nig_pri_tc_map_req *req);
+#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
*
@@ -185,7 +226,10 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
* @param req - the PRS ETS initialization requirements.
*/
void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, struct init_ets_req *req);
+ struct ecore_ptt *p_ptt,
+ struct init_ets_req *req);
+#endif /* UNUSED_HSI_FUNC */
+#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
*
@@ -195,43 +239,52 @@ void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
* @param req - the BRB RAM initialization requirements.
*/
void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, struct init_brb_ram_req *req);
+ struct ecore_ptt *p_ptt,
+ struct init_brb_ram_req *req);
+#endif /* UNUSED_HSI_FUNC */
+#ifndef UNUSED_HSI_FUNC
/**
- * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf
- * and llh ethType Regs to input ethType
- * should Be called once per engine if engine is in BD mode.
+ * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf and llh
+ * ethType Regs to input ethType
+ * should Be called once per engine
+ * if engine
+ * is in BD mode.
*
* @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 eth_type);
+ struct ecore_ptt *p_ptt, u32 ethType);
/**
- * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs
- * to input ethType
- * should Be called once per port.
+ * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
+ * input ethType should Be called
+ * once per port.
*
* @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 eth_type);
+ struct ecore_ptt *p_ptt, u32 ethType);
+#endif /* UNUSED_HSI_FUNC */
/**
- * @brief ecore_set_vxlan_dest_port - init vxlan tunnel destination udp port
+ * @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp
+ * port
*
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - vxlan destination udp port.
*/
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 dest_port);
+ struct ecore_ptt *p_ptt,
+ u16 dest_port);
/**
* @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
*
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_ptt - ptt window used for writing the registers.
* @param vxlan_enable - vxlan enable flag.
*/
void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, bool vxlan_enable);
+ struct ecore_ptt *p_ptt,
+ bool vxlan_enable);
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
@@ -241,15 +294,18 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
*/
void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- bool eth_gre_enable, bool ip_gre_enable);
+ bool eth_gre_enable,
+ bool ip_gre_enable);
/**
- * @brief ecore_set_geneve_dest_port - init geneve tunnel destination udp port
+ * @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination
+ * udp port
*
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - geneve destination udp port.
*/
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 dest_port);
+ struct ecore_ptt *p_ptt,
+ u16 dest_port);
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
@@ -259,5 +315,65 @@ void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
*/
void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- bool eth_geneve_enable, bool ip_geneve_enable);
+ bool eth_geneve_enable,
+ bool ip_geneve_enable);
+#ifndef UNUSED_HSI_FUNC
+/**
+* @brief ecore_set_gft_event_id_cm_hdr - configure GFT event id and cm header
+*
+* @param p_ptt - ptt window used for writing the registers.
+*/
+void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+/**
+* @brief ecore_set_rfs_mode_enable - enable and configure HW for RFS
+*
+*
+* @param p_ptt - ptt window used for writing the registers.
+* @param pf_id - pf on which to enable RFS.
+* @param tcp - set profile tcp packets.
+* @param udp - set profile udp packet.
+* @param ipv4 - set profile ipv4 packet.
+* @param ipv6 - set profile ipv6 packet.
+*/
+void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4,
+ bool ipv6);
+#endif /* UNUSED_HSI_FUNC */
+/**
+* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
+* used before first ETH queue started.
+*
+*
+* @param p_ptt - ptt window used for writing the registers. Don't care
+* if runtime_init used
+* @param mode - VF zone size mode. Use enum vf_zone_size_mode.
+* @param runtime_init - Set 1 to init runtime registers in engine phase. Set 0
+* if VF zone size mode configured after engine phase.
+*/
+void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt
+ *p_ptt, u16 mode, bool runtime_init);
+/**
+* @brief ecore_get_mstorm_queue_stat_offset - get mstorm statistics offset by VF
+* zone size mode.
+*
+* @param stat_cnt_id - statistic counter id
+* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
+*/
+u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
+ u16 stat_cnt_id, u16 vf_zone_size_mode);
+/**
+* @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
+* size mode.
+*
+* @param vf_id - vf id.
+* @param vf_queue_id - per VF rx queue id.
+* @param vf_zone_size_mode - vf zone size mode. Use enum vf_zone_size_mode.
+*/
+u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8
+ vf_queue_id, u16 vf_zone_size_mode);
#endif
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index 326eb926..351e9467 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -251,9 +251,9 @@ static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
b_can_dmae);
if (rc)
break;
- }
- break;
}
+ break;
+ }
case INIT_ARR_STANDARD:
size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
@@ -525,7 +525,7 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
* not too bright, but it should work on the simple FPGA/EMUL
* scenarios.
*/
- bool initialized = false; /* @DPDK */
+ static bool initialized;
int poll_cnt = 500;
u32 val;
@@ -573,9 +573,10 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
return ECORE_INVAL;
}
- buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
+ /* First Dword contains metadata and should be skipped */
+ buf_hdr = (struct bin_buffer_hdr *)((uintptr_t)(data + sizeof(u32)));
- offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+ offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h
index 8a6fce41..d58c7d6a 100644
--- a/drivers/net/qede/base/ecore_init_ops.h
+++ b/drivers/net/qede/base/ecore_init_ops.h
@@ -32,7 +32,9 @@ void ecore_init_iro_array(struct ecore_dev *p_dev);
*/
enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- int phase, int phase_id, int modes);
+ int phase,
+ int phase_id,
+ int modes);
/**
* @brief ecore_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
@@ -52,6 +54,7 @@ enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn);
*/
void ecore_init_free(struct ecore_hwfn *p_hwfn);
+
/**
* @brief ecore_init_clear_rt_data - Clears the runtime init array.
*
@@ -68,7 +71,9 @@ void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
* @param rt_offset
* @param val
*/
-void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val);
+void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val);
#define STORE_RT_REG(hwfn, offset, val) \
ecore_init_store_rt_reg(hwfn, offset, val)
@@ -87,11 +92,14 @@ void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val);
*/
void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
- u32 rt_offset, u32 *val, osal_size_t size);
+ u32 rt_offset,
+ u32 *val,
+ osal_size_t size);
#define STORE_RT_REG_AGG(hwfn, offset, val) \
ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+
/**
* @brief
* Initialize GTT global windows and set admin window
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index ea0fd7aa..6fb037df 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -21,7 +21,6 @@
#include "ecore_hw_defs.h"
#include "ecore_hsi_common.h"
#include "ecore_mcp.h"
-#include "ecore_attn_values.h"
struct ecore_pi_info {
ecore_int_comp_cb_t comp_cb;
@@ -61,8 +60,6 @@ struct aeu_invert_reg_bit {
#define ATTENTION_OFFSET_SHIFT (12)
#define ATTENTION_CLEAR_ENABLE (1 << 28)
-#define ATTENTION_FW_DUMP (1 << 29)
-#define ATTENTION_PANIC_DUMP (1 << 30)
unsigned int flags;
/* Callback to call if attention will be triggered */
@@ -100,7 +97,7 @@ static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
#define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
#define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
-#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
+#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
@@ -284,16 +281,7 @@ out:
#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
{
- u32 tmp, reg_addr;
-
- reg_addr =
- attn_blocks[BLOCK_PGLUE_B].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
- int_regs[0]->mask_addr;
-
- /* Mask unnecessary attentions -@TBD move to MFW */
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
- tmp |= (1 << 19); /* Was PGL_PCIE_ATTN */
- ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
+ u32 tmp;
tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS2);
@@ -407,32 +395,6 @@ static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-static enum _ecore_status_t ecore_nig_attn_cb(struct ecore_hwfn *p_hwfn)
-{
- u32 tmp, reg_addr;
-
- /* Mask unnecessary attentions -@TBD move to MFW */
- reg_addr =
- attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
- int_regs[3]->mask_addr;
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
- tmp |= (1 << 0); /* Was 3_P0_TX_PAUSE_TOO_LONG_INT */
- tmp |= NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT;
- ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
-
- reg_addr =
- attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
- int_regs[5]->mask_addr;
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
- tmp |= (1 << 0); /* Was 5_P1_TX_PAUSE_TOO_LONG_INT */
- ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
-
- /* TODO - a bit risky to return success here; But alternative is to
- * actually read the multitdue of interrupt register of the block.
- */
- return ECORE_SUCCESS;
-}
-
static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
{
DP_NOTICE(p_hwfn, false, "FW assertion!\n");
@@ -559,7 +521,7 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
{"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
MAX_BLOCK_ID},
- {"NIG", ATTENTION_PAR_INT, ecore_nig_attn_cb, BLOCK_NIG},
+ {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
{"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
{"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
{"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
@@ -761,46 +723,12 @@ static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static void ecore_int_deassertion_print_bit(struct ecore_hwfn *p_hwfn,
- struct attn_hw_reg *p_reg_desc,
- struct attn_hw_block *p_block,
- enum ecore_attention_type type,
- u32 val, u32 mask)
+static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
+ enum block_id id, enum dbg_attn_type type,
+ bool b_clear)
{
- int j;
-#ifdef ATTN_DESC
- const char **description;
-
- if (type == ECORE_ATTN_TYPE_ATTN)
- description = p_block->int_desc;
- else
- description = p_block->prty_desc;
-#endif
-
- for (j = 0; j < p_reg_desc->num_of_bits; j++) {
- if (val & (1 << j)) {
-#ifdef ATTN_DESC
- DP_NOTICE(p_hwfn, false,
- "%s (%s): %s [reg %d [0x%08x], bit %d]%s\n",
- p_block->name,
- type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
- "Parity",
- description[p_reg_desc->bit_attn_idx[j]],
- p_reg_desc->reg_idx,
- p_reg_desc->sts_addr, j,
- (mask & (1 << j)) ? " [MASKED]" : "");
-#else
- DP_NOTICE(p_hwfn->p_dev, false,
- "%s (%s): [reg %d [0x%08x], bit %d]%s\n",
- p_block->name,
- type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
- "Parity",
- p_reg_desc->reg_idx,
- p_reg_desc->sts_addr, j,
- (mask & (1 << j)) ? " [MASKED]" : "");
-#endif
- }
- }
+ /* @DPDK */
+ DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type);
}
/**
@@ -818,120 +746,54 @@ static void ecore_int_deassertion_print_bit(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t
ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_aeu,
- u32 aeu_en_reg, u32 bitmask)
+ u32 aeu_en_reg,
+ const char *p_bit_name,
+ u32 bitmask)
{
enum _ecore_status_t rc = ECORE_INVAL;
- u32 val, mask;
-
-#ifndef REMOVE_DBG
- u32 interrupts[20]; /* TODO- change into HSI define once supplied */
-
- OSAL_MEMSET(interrupts, 0, sizeof(u32) * 20); /* FIXME real size) */
-#endif
+ bool b_fatal = false;
DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
- p_aeu->bit_name, bitmask);
+ p_bit_name, bitmask);
/* Call callback before clearing the interrupt status */
if (p_aeu->cb) {
DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
- p_aeu->bit_name);
+ p_bit_name);
rc = p_aeu->cb(p_hwfn);
}
- /* Handle HW block interrupt registers */
- if (p_aeu->block_index != MAX_BLOCK_ID) {
- u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev);
- struct attn_hw_block *p_block;
- int i;
-
- p_block = &attn_blocks[p_aeu->block_index];
-
- /* Handle each interrupt register */
- for (i = 0;
- i < p_block->chip_regs[chip_type].num_of_int_regs; i++) {
- struct attn_hw_reg *p_reg_desc;
- u32 sts_addr;
-
- p_reg_desc = p_block->chip_regs[chip_type].int_regs[i];
+ if (rc != ECORE_SUCCESS)
+ b_fatal = true;
- /* In case of fatal attention, don't clear the status
- * so it would appear in idle check.
- */
- if (rc == ECORE_SUCCESS)
- sts_addr = p_reg_desc->sts_clr_addr;
- else
- sts_addr = p_reg_desc->sts_addr;
-
- val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
- mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- p_reg_desc->mask_addr);
- ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc,
- p_block,
- ECORE_ATTN_TYPE_ATTN,
- val, mask);
-
-#ifndef REMOVE_DBG
- interrupts[i] = val;
-#endif
- }
- }
+ /* Print HW block interrupt registers */
+ if (p_aeu->block_index != MAX_BLOCK_ID) {
+ ecore_int_attn_print(p_hwfn, p_aeu->block_index,
+ ATTN_TYPE_INTERRUPT, !b_fatal);
+}
/* Reach assertion if attention is fatal */
- if (rc != ECORE_SUCCESS) {
+ if (b_fatal) {
DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
- p_aeu->bit_name);
+ p_bit_name);
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
}
/* Prevent this Attention from being asserted in the future */
- if (p_aeu->flags & ATTENTION_CLEAR_ENABLE) {
+ if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
+ p_hwfn->p_dev->attn_clr_en) {
u32 val;
u32 mask = ~bitmask;
val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
- p_aeu->bit_name);
- }
-
- if (p_aeu->flags & (ATTENTION_FW_DUMP | ATTENTION_PANIC_DUMP)) {
- /* @@@TODO - what to dump? <yuvalmin 04/02/13> */
- DP_ERR(p_hwfn->p_dev, "`%s' - Dumps aren't implemented yet\n",
- p_aeu->bit_name);
- return ECORE_NOTIMPL;
+ p_bit_name);
}
return rc;
}
-static void ecore_int_parity_print(struct ecore_hwfn *p_hwfn,
- struct aeu_invert_reg_bit *p_aeu,
- struct attn_hw_block *p_block, u8 bit_index)
-{
- u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev);
- int i;
-
- for (i = 0; i < p_block->chip_regs[chip_type].num_of_prty_regs; i++) {
- struct attn_hw_reg *p_reg_desc;
- u32 val, mask;
-
- p_reg_desc = p_block->chip_regs[chip_type].prty_regs[i];
-
- val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- p_reg_desc->sts_clr_addr);
- mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- p_reg_desc->mask_addr);
- DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
- "%s[%d] - parity register[%d] is %08x [mask is %08x]\n",
- p_aeu->bit_name, bit_index, i, val, mask);
- ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc,
- p_block,
- ECORE_ATTN_TYPE_PARITY,
- val, mask);
- }
-}
-
/**
* @brief ecore_int_deassertion_parity - handle a single parity AEU source
*
@@ -949,19 +811,18 @@ static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
p_aeu->bit_name, bit_index);
- if (block_id != MAX_BLOCK_ID) {
- ecore_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
- bit_index);
-
- /* In A0, there's a single parity bit for several blocks */
- if (block_id == BLOCK_BTB) {
- ecore_int_parity_print(p_hwfn, p_aeu,
- &attn_blocks[BLOCK_OPTE],
- bit_index);
- ecore_int_parity_print(p_hwfn, p_aeu,
- &attn_blocks[BLOCK_MCP],
- bit_index);
- }
+ if (block_id == MAX_BLOCK_ID)
+ return;
+
+ ecore_int_attn_print(p_hwfn, block_id,
+ ATTN_TYPE_PARITY, false);
+
+ /* In A0, there's a single parity bit for several blocks */
+ if (block_id == BLOCK_BTB) {
+ ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
+ ATTN_TYPE_PARITY, false);
+ ecore_int_attn_print(p_hwfn, BLOCK_MCP,
+ ATTN_TYPE_PARITY, false);
}
}
@@ -1041,8 +902,8 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
* previous assertion.
*/
for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ unsigned long int bitmask;
u8 bit, bit_len;
- u32 bitmask;
p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
@@ -1060,10 +921,31 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
bitmask = bits & (((1 << bit_len) - 1) << bit);
if (bitmask) {
+ u32 flags = p_aeu->flags;
+ char bit_name[30];
+
+ bit = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
+ bit_len);
+
+ /* Some bits represent more than a
+ * a single interrupt. Correctly print
+ * their name.
+ */
+ if (ATTENTION_LENGTH(flags) > 2 ||
+ ((flags & ATTENTION_PAR_INT) &&
+ ATTENTION_LENGTH(flags) > 1))
+ OSAL_SNPRINTF(bit_name, 30,
+ p_aeu->bit_name,
+ bit);
+ else
+ OSAL_STRNCPY(bit_name,
+ p_aeu->bit_name,
+ 30);
/* Handle source of the attention */
ecore_int_deassertion_aeu_bit(p_hwfn,
p_aeu,
aeu_en,
+ bit_name,
bitmask);
}
@@ -1097,8 +979,8 @@ static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
u16 index = 0, asserted_bits, deasserted_bits;
- enum _ecore_status_t rc = ECORE_SUCCESS;
u32 attn_bits = 0, attn_acks = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* Read current attention bits/acks - safeguard against attentions
* by guaranting work on a synchronized timeframe
@@ -1170,13 +1052,11 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
struct ecore_pi_info *pi_info = OSAL_NULL;
struct ecore_sb_attn_info *sb_attn;
struct ecore_sb_info *sb_info;
- static int arr_size;
+ int arr_size;
u16 rc = 0;
- if (!p_hwfn) {
- DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
+ if (!p_hwfn)
return;
- }
if (!p_hwfn->p_sp_sb) {
DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
@@ -1237,7 +1117,8 @@ void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
return;
}
- /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+/* Check the validity of the DPC ptt. If not ack interrupts and fail */
+
if (!p_hwfn->p_dpc_ptt) {
DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
@@ -1350,7 +1231,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
void *p_virt;
/* SB struct */
- p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(struct ecore_sb_attn_info));
+ p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
if (!p_sb) {
DP_NOTICE(p_dev, true,
"Failed to allocate `struct ecore_sb_attn_info'");
@@ -1375,17 +1256,8 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
}
/* coalescing timeout = timeset << (timer_res + 1) */
-#ifdef RTE_LIBRTE_QEDE_RX_COAL_US
-#define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
-#else
#define ECORE_CAU_DEF_RX_USECS 24
-#endif
-
-#ifdef RTE_LIBRTE_QEDE_TX_COAL_US
-#define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
-#else
#define ECORE_CAU_DEF_TX_USECS 48
-#endif
void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
@@ -1393,6 +1265,7 @@ void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 cau_state;
+ u8 timer_res;
OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
@@ -1402,28 +1275,33 @@ void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
- /* setting the time resultion to a fixed value ( = 1) */
- SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
- ECORE_CAU_DEF_RX_TIMER_RES);
- SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
- ECORE_CAU_DEF_TX_TIMER_RES);
-
cau_state = CAU_HC_DISABLE_STATE;
if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
cau_state = CAU_HC_ENABLE_STATE;
- if (!p_dev->rx_coalesce_usecs) {
+ if (!p_dev->rx_coalesce_usecs)
p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
- DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
- p_dev->rx_coalesce_usecs);
- }
- if (!p_dev->tx_coalesce_usecs) {
+ if (!p_dev->tx_coalesce_usecs)
p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
- DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
- p_dev->tx_coalesce_usecs);
- }
}
+ /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
+ if (p_dev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_dev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ if (p_dev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_dev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
}
@@ -1463,17 +1341,32 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
/* Configure pi coalescing if set */
if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
- u8 num_tc = 1; /* @@@TBD aelior ECORE_MULTI_COS */
- u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
- (ECORE_CAU_DEF_RX_TIMER_RES + 1);
+ /* eth will open queues for all tcs, so configure all of them
+ * properly, rather than just the active ones
+ */
+ u8 num_tc = p_hwfn->hw_info.num_hw_tc;
+
+ u8 timeset, timer_res;
u8 i;
+ /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
+ if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
ECORE_COAL_RX_STATE_MACHINE, timeset);
- timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
- (ECORE_CAU_DEF_TX_TIMER_RES + 1);
-
+ if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
for (i = 0; i < num_tc; i++) {
ecore_int_cau_conf_pi(p_hwfn, p_ptt,
igu_sb_id, TX_PI(i),
@@ -1647,10 +1540,10 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
/* SB struct */
p_sb =
OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(struct ecore_sb_sp_info));
+ sizeof(*p_sb));
if (!p_sb) {
DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_sb_info'");
+ "Failed to allocate `struct ecore_sb_info'\n");
return ECORE_NOMEM;
}
@@ -1658,7 +1551,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
&p_phys, SB_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
+ DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
OSAL_FREE(p_hwfn->p_dev, p_sb);
return ECORE_NOMEM;
}
@@ -1719,14 +1612,14 @@ void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_int_mode int_mode)
{
- u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
- else
+ igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
+ }
#endif
- igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
p_hwfn->p_dev->int_mode = int_mode;
switch (p_hwfn->p_dev->int_mode) {
@@ -1767,6 +1660,7 @@ static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
+ /* Flush the writes to IGU */
OSAL_MMIOWB(p_hwfn->p_dev);
/* Unmask AEU signals toward IGU */
@@ -1775,16 +1669,10 @@ static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- enum ecore_int_mode int_mode)
+ enum ecore_int_mode int_mode)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- u32 tmp, reg_addr;
-
- /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
- tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
- tmp |= 0xf;
- ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
- ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
+ u32 tmp;
/* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
* attentions. Since we're waiting for BRCM answer regarding this
@@ -1794,16 +1682,6 @@ ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
tmp &= ~0x800;
ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
- /* @@@tmp - Mask interrupt sources - should move to init tool;
- * Also, correct for A0 [might still change in B0.
- */
- reg_addr =
- attn_blocks[BLOCK_BRB].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
- int_regs[0]->mask_addr;
- tmp = ecore_rd(p_hwfn, p_ptt, reg_addr);
- tmp |= (1 << 21); /* Was PKT4_LEN_ERROR */
- ecore_wr(p_hwfn, p_ptt, reg_addr, tmp);
-
ecore_int_igu_enable_attn(p_hwfn, p_ptt);
if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
@@ -1836,7 +1714,7 @@ void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
}
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
-void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
+static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 sb_id, bool cleanup_set, u16 opaque_fid)
{
@@ -1868,6 +1746,7 @@ void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+ /* Flush the write to IGU */
OSAL_MMIOWB(p_hwfn->p_dev);
/* calculate where to read the status bit from */
@@ -1894,7 +1773,7 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 sb_id, u16 opaque, bool b_set)
{
- int pi;
+ int pi, i;
/* Set */
if (b_set)
@@ -1903,6 +1782,23 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
/* Clear */
ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+ /* Wait for the IGU SB to cleanup */
+ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
+ u32 val;
+
+ val = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_WRITE_DONE_PENDING +
+ ((sb_id / 32) * 4));
+ if (val & (1 << (sb_id % 32)))
+ OSAL_UDELAY(10);
+ else
+ break;
+ }
+ if (i == IGU_CLEANUP_SLEEP_LENGTH)
+ DP_NOTICE(p_hwfn, true,
+ "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
+ sb_id);
+
/* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++)
ecore_wr(p_hwfn, p_ptt,
@@ -1978,8 +1874,8 @@ enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
{
struct ecore_igu_info *p_igu_info;
struct ecore_igu_block *p_block;
+ u32 min_vf = 0, max_vf = 0, val;
u16 sb_id, last_iov_sb_id = 0;
- u32 min_vf, max_vf, val;
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
@@ -1998,16 +1894,14 @@ enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff;
-#ifdef CONFIG_ECORE_SRIOV
- min_vf = p_hwfn->hw_info.first_vf_in_pf;
- max_vf = p_hwfn->hw_info.first_vf_in_pf +
- p_hwfn->p_dev->sriov_info.total_vfs;
-#else
- min_vf = 0;
- max_vf = 0;
-#endif
+ if (p_hwfn->p_dev->p_iov_info) {
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
- for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ min_vf = p_iov->first_vf_in_pf;
+ max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
+ }
+ for (sb_id = 0;
+ sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
sb_id++) {
p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
@@ -2209,11 +2103,11 @@ u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
} else if ((sb_id >= p_info->igu_base_sb_iov) &&
(sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+ } else {
+ DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
+ sb_id);
+ return 0;
}
-
- DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
- sb_id);
- return 0;
}
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
@@ -2223,3 +2117,45 @@ void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
for_each_hwfn(p_dev, i)
p_dev->hwfns[i].b_int_requested = false;
}
+
+void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
+{
+ p_dev->attn_clr_en = clr_enable;
+}
+
+enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx)
+{
+ enum _ecore_status_t rc;
+ struct cau_sb_entry sb_entry;
+
+ if (!p_hwfn->hw_init_done) {
+ DP_ERR(p_hwfn, "hardware not initialized yet\n");
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64),
+ (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ if (tx)
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+ else
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64), 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 17c95213..45358b94 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -122,22 +122,6 @@ u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn);
* @param p_hwfn
* @param p_ptt
* @param sb_id - igu status block id
- * @param cleanup_set - set(1) / clear(0)
- * @param opaque_fid - the function for which to perform
- * cleanup, for example a PF on behalf of
- * its VFs.
- */
-void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 sb_id, bool cleanup_set, u16 opaque_fid);
-
-/**
- * @brief Status block cleanup. Should be called for each status
- * block that will be used -> both PF / VF
- *
- * @param p_hwfn
- * @param p_ptt
- * @param sb_id - igu status block id
* @param opaque - opaque fid of the sb owner.
* @param cleanup_set - set(1) / clear(0)
*/
@@ -169,8 +153,8 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief ecore_int_free
@@ -223,6 +207,9 @@ void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry, u8 pf_id,
u16 vf_number, u8 vf_valid);
+enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx);
#ifndef ASIC_ONLY
#define ECORE_MAPPING_MEMORY_SIZE(dev) \
((CHIP_REV_IS_SLOW(dev) && (!(dev)->b_is_emul_full)) ? \
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index f6db807e..fc873e77 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -274,4 +274,15 @@ void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
*/
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
+/**
+ * @brief ecore_int_attn_clr_enable - sets whether the general behavior is
+ * preventing attentions from being reasserted, or following the
+ * attributes of the specific attention.
+ *
+ * @param p_dev
+ * @param clr_enable
+ *
+ */
+void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable);
+
#endif
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index b34a9c6b..bb8df82f 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -9,34 +9,49 @@
#ifndef __ECORE_SRIOV_API_H__
#define __ECORE_SRIOV_API_H__
+#include "common_hsi.h"
#include "ecore_status.h"
+#define ECORE_ETH_VF_NUM_MAC_FILTERS 1
+#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
#define ECORE_VF_ARRAY_LENGTH (3)
#define IS_VF(p_dev) ((p_dev)->b_is_vf)
#define IS_PF(p_dev) (!((p_dev)->b_is_vf))
#ifdef CONFIG_ECORE_SRIOV
-#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->p_dev->sriov_info.total_vfs))
+#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->p_dev->p_iov_info))
#else
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
-#define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */
+#define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */
/* @@@ TBD MichalK - what should this number be*/
#define ECORE_MAX_VF_CHAINS_PER_PF 16
/* vport update extended feature tlvs flags */
enum ecore_iov_vport_update_flag {
- ECORE_IOV_VP_UPDATE_ACTIVATE = 0,
- ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1,
- ECORE_IOV_VP_UPDATE_TX_SWITCH = 2,
- ECORE_IOV_VP_UPDATE_MCAST = 3,
- ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4,
- ECORE_IOV_VP_UPDATE_RSS = 5,
- ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6,
- ECORE_IOV_VP_UPDATE_SGE_TPA = 7,
- ECORE_IOV_VP_UPDATE_MAX = 8,
+ ECORE_IOV_VP_UPDATE_ACTIVATE = 0,
+ ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1,
+ ECORE_IOV_VP_UPDATE_TX_SWITCH = 2,
+ ECORE_IOV_VP_UPDATE_MCAST = 3,
+ ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4,
+ ECORE_IOV_VP_UPDATE_RSS = 5,
+ ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6,
+ ECORE_IOV_VP_UPDATE_SGE_TPA = 7,
+ ECORE_IOV_VP_UPDATE_MAX = 8,
+};
+
+/* PF to VF STATUS is part of vfpf-channel API
+ * and must be forward compatible
+*/
+enum ecore_iov_pf_to_vf_status {
+ PFVF_STATUS_WAITING = 0,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE,
+ PFVF_STATUS_FORCED,
};
struct ecore_mcp_link_params;
@@ -53,6 +68,14 @@ struct ecore_mcp_link_capabilities;
struct ecore_vf_acquire_sw_info {
u32 driver_version;
u8 os_type;
+
+ /* We have several close releases that all use ~same FW with different
+ * versions [making it incompatible as the versioning scheme is still
+ * tied directly to FW version], allow to override the checking. Only
+ * those versions would actually support this feature [so it would not
+ * break forward compatibility with newer HV drivers that are no longer
+ * suited].
+ */
bool override_fw_version;
};
@@ -67,21 +90,21 @@ struct ecore_public_vf_info {
#ifdef CONFIG_ECORE_SW_CHANNEL
/* This is SW channel related only... */
enum mbx_state {
- VF_PF_UNKNOWN_STATE = 0,
- VF_PF_WAIT_FOR_START_REQUEST = 1,
- VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2,
- VF_PF_REQUEST_IN_PROCESSING = 3,
- VF_PF_RESPONSE_READY = 4,
+ VF_PF_UNKNOWN_STATE = 0,
+ VF_PF_WAIT_FOR_START_REQUEST = 1,
+ VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2,
+ VF_PF_REQUEST_IN_PROCESSING = 3,
+ VF_PF_RESPONSE_READY = 4,
};
struct ecore_iov_sw_mbx {
- enum mbx_state mbx_state;
+ enum mbx_state mbx_state;
- u32 request_size;
- u32 request_offset;
+ u32 request_size;
+ u32 request_offset;
- u32 response_size;
- u32 response_offset;
+ u32 response_size;
+ u32 response_offset;
};
/**
@@ -92,32 +115,58 @@ struct ecore_iov_sw_mbx {
*
* @return struct ecore_iov_sw_mbx*
*/
-struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
- u16 rel_vf_id);
+struct ecore_iov_sw_mbx*
+ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
#endif
+/* This struct is part of ecore_dev and contains data relevant to all hwfns;
+ * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
+ */
+struct ecore_hw_sriov_info {
+ /* standard SRIOV capability fields, mostly for debugging */
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total_vfs; /* total VFs associated with the PF */
+ u16 num_vfs; /* number of vfs that have been started */
+ u16 initial_vfs; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u16 vf_device_id; /* VF device id */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+
+ u32 first_vf_in_pf;
+};
+
#ifdef CONFIG_ECORE_SRIOV
+#ifndef LINUX_REMOVE
/**
* @brief mark/clear all VFs before/after an incoming PCIe sriov
* disable.
*
- * @param p_hwfn
+ * @param p_dev
* @param to_disable
*/
-void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable);
+void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
+ u8 to_disable);
/**
- * @brief mark/clear chosen VFs before/after an incoming PCIe
+ * @brief mark/clear chosen VF before/after an incoming PCIe
* sriov disable.
*
- * @param p_hwfn
+ * @param p_dev
+ * @param rel_vf_id
* @param to_disable
*/
-void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
- u16 rel_vf_id, u8 to_disable);
+void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
+ u16 rel_vf_id,
+ u8 to_disable);
/**
- *
* @brief ecore_iov_init_hw_for_vf - initialize the HW for
* enabling access of a VF. Also includes preparing the
* IGU for VF access. This needs to be called AFTER hw is
@@ -132,7 +181,8 @@ void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 rel_vf_id, u16 num_rx_queues);
+ u16 rel_vf_id,
+ u16 num_rx_queues);
/**
* @brief ecore_iov_process_mbx_req - process a request received
@@ -143,7 +193,8 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
* @param vfid
*/
void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, int vfid);
+ struct ecore_ptt *p_ptt,
+ int vfid);
/**
* @brief ecore_iov_release_hw_for_vf - called once upper layer
@@ -161,7 +212,6 @@ enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id);
-#ifndef LINUX_REMOVE
/**
* @brief ecore_iov_set_vf_ctx - set a context for a given VF
*
@@ -172,8 +222,8 @@ enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
- u16 vf_id, void *ctx);
-#endif
+ u16 vf_id,
+ void *ctx);
/**
* @brief FLR cleanup for all VFs
@@ -197,7 +247,8 @@ enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t
ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 rel_vf_id);
+ struct ecore_ptt *p_ptt,
+ u16 rel_vf_id);
/**
* @brief Update the bulletin with link information. Notice this does NOT
@@ -238,7 +289,8 @@ void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
*
* @return bool
*/
-bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
* @brief Check if given VF ID @vfid is valid
@@ -253,7 +305,8 @@ bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
* @return bool - true for valid VF ID
*/
bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
- int rel_vf_id, bool b_enabled_only);
+ int rel_vf_id,
+ bool b_enabled_only);
/**
* @brief Get VF's public info structure
@@ -264,9 +317,9 @@ bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
*
* @return struct ecore_public_vf_info *
*/
-struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn
- *p_hwfn, u16 vfid,
- bool b_enabled_only);
+struct ecore_public_vf_info*
+ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 vfid, bool b_enabled_only);
/**
* @brief Set pending events bitmap for given @vfid
@@ -295,7 +348,8 @@ void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *ptt, int vfid);
+ struct ecore_ptt *ptt,
+ int vfid);
/**
* @brief Set forced MAC address in PFs copy of bulletin board
* and configures FW/HW to support the configuration.
@@ -319,17 +373,6 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
u8 *mac, int vfid);
/**
- * @brief Set forced VLAN [pvid] in PFs copy of bulletin board
- * and configures FW/HW to support the configuration.
- * Setting of pvid 0 would clear the feature.
- * @param p_hwfn
- * @param pvid
- * @param vfid
- */
-void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
- u16 pvid, int vfid);
-
-/**
* @brief Set default behaviour of VF in case no vlans are configured for it
* whether to accept only untagged traffic or all.
* Must be called prior to the VF vport-start.
@@ -342,7 +385,9 @@ void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t
ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
- bool b_untagged_only, int vfid);
+ bool b_untagged_only,
+ int vfid);
+
/**
* @brief Get VFs opaque fid.
*
@@ -364,6 +409,17 @@ void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
u8 *p_vport_id);
/**
+ * @brief Set forced VLAN [pvid] in PFs copy of bulletin board
+ * and configures FW/HW to support the configuration.
+ * Setting of pvid 0 would clear the feature.
+ * @param p_hwfn
+ * @param pvid
+ * @param vfid
+ */
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 pvid, int vfid);
+
+/**
* @brief Check if VF has VPORT instance. This can be used
* to check if VPORT is active.
*
@@ -457,9 +513,9 @@ void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
* @param p_reply_virt_size
*/
void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
- u16 rel_vf_id,
+ u16 rel_vf_id,
void **pp_reply_virt_addr,
- u16 *p_reply_virt_size);
+ u16 *p_reply_virt_size);
/**
* @brief Validate if the given length is a valid vfpf message
@@ -486,7 +542,8 @@ u32 ecore_iov_pfvf_msg_length(void);
*
* @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
*/
-u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
* @brief Returns pvid if one is configured
@@ -535,7 +592,8 @@ enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
*
* @return num of rxqs chains.
*/
-u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
* @brief - Retrieves num of active rxqs chains
@@ -545,7 +603,8 @@ u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
*
* @return
*/
-u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
* @brief - Retrieves ctx pointer
@@ -555,7 +614,8 @@ u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
*
* @return
*/
-void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
* @brief - Retrieves VF`s num sbs
@@ -565,7 +625,8 @@ void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
*
* @return
*/
-u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
* @brief - Returm true if VF is waiting for acquire
@@ -575,7 +636,8 @@ u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
*
* @return
*/
-bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
* @brief - Returm true if VF is acquired but not initialized
@@ -596,7 +658,8 @@ bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
*
* @return
*/
-bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
* @brief - Get VF's vport min rate configured.
@@ -617,317 +680,23 @@ int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
*/
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
int vfid, u32 rate);
-#else
-static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn,
- u8 to_disable)
-{
-}
-
-static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
- u16 rel_vf_id,
- u8 to_disable)
-{
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct
- ecore_hwfn
- * p_hwfn,
- struct
- ecore_ptt
- * p_ptt,
- u16 rel_vf_id,
- u16
- num_rx_queues)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- int vfid)
-{
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct
- ecore_hwfn
- * p_hwfn,
- struct
- ecore_ptt
- * p_ptt,
- u16
- rel_vf_id)
-{
- return ECORE_SUCCESS;
-}
-
-#ifndef LINUX_REMOVE
-static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn
- *p_hwfn, u16 vf_id,
- void *ctx)
-{
- return ECORE_INVAL;
-}
-#endif
-static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct
- ecore_hwfn
- * p_hwfn,
- struct
- ecore_ptt
- * p_ptt)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(
- struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, u16 vfid,
- struct ecore_mcp_link_params *params,
- struct ecore_mcp_link_state *link,
- struct ecore_mcp_link_capabilities
- *p_caps)
-{
-}
-
-static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, u16 vfid,
- struct ecore_mcp_link_params *params,
- struct ecore_mcp_link_state *link,
- struct ecore_mcp_link_capabilities
- *p_caps)
-{
-}
-
-static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
- u16 rel_vf_id)
-{
- return false;
-}
-
-static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
- int rel_vf_id,
- bool b_enabled_only)
-{
- return false;
-}
-
-static OSAL_INLINE struct ecore_public_vf_info *
- ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, u16 vfid,
- bool b_enabled_only)
-{
- return OSAL_NULL;
-}
-
-static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn
- *p_hwfn, u8 vfid)
-{
-}
-
-static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct
- ecore_hwfn
- * p_hwfn,
- u64 *events)
-{
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn
- *p_hwfn,
- struct ecore_ptt
- *ptt, int vfid)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn
- *p_hwfn, u8 *mac,
- int vfid)
-{
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct
- ecore_hwfn
- * p_hwfn,
- u8 *mac,
- int vfid)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn
- p_hwfn, u16 pvid,
- int vfid)
-{
-}
-
-static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn,
- int vfid, u16 *opaque_fid)
-{
-}
-
-static OSAL_INLINE void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn,
- int vfid, u8 *p_vport_id)
-{
-}
-
-static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn
- *p_hwfn, int vfid)
-{
- return false;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct
- ecore_hwfn
- * p_hwfn,
- int vfid,
- struct
- ecore_ptt
- * p_ptt)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn,
- int vfid)
-{
- return false;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn
- *p_hwfn,
- int vfid,
- bool val)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn,
- int vfid)
-{
- return false;
-}
-
-static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn,
- int vfid)
-{
- return false;
-}
-
-static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
-{
- return 0;
-}
-
-static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn
- *p_hwfn,
- u16 rel_vf_id,
- void
- **pp_req_virt_addr,
- u16 *
- p_req_virt_size)
-{
-}
-
-static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn
- *p_hwfn,
- u16 rel_vf_id,
- void
- **pp_reply_virt_addr,
- u16 *
- p_reply_virt_size)
-{
-}
-
-static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
-{
- return false;
-}
-
-static OSAL_INLINE u32 ecore_iov_pfvf_msg_length(void)
-{
- return 0;
-}
-
-static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn
- *p_hwfn, u16 rel_vf_id)
-{
- return OSAL_NULL;
-}
-
-static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn
- *p_hwfn,
- u16 rel_vf_id)
-{
- return 0;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct
- ecore_hwfn
- * p_hwfn,
- struct
- ecore_ptt
- * p_ptt,
- int vfid,
- int val)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
- u16 rel_vf_id)
-{
- return 0;
-}
-
-static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn
- *p_hwfn, u16 rel_vf_id)
-{
- return 0;
-}
-
-static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
- u16 rel_vf_id)
-{
- return OSAL_NULL;
-}
-
-static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
- u16 rel_vf_id)
-{
- return 0;
-}
-
-static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn
- *p_hwfn, u16 rel_vf_id)
-{
- return false;
-}
-
-static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct
- ecore_hwfn
- * p_hwfn,
- u16 rel_vf_id)
-{
- return false;
-}
-
-static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
- u16 rel_vf_id)
-{
- return false;
-}
-
-static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn,
- int vfid)
-{
- return 0;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(
- struct ecore_dev *p_dev, int vfid, u32 rate)
-{
- return ECORE_INVAL;
-}
#endif
+
+/**
+ * @brief - Given a VF index, return index of next [including that] active VF.
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ */
+u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+
+#endif /* CONFIG_ECORE_SRIOV */
+
+#define ecore_for_each_vf(_p_hwfn, _i) \
+ for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
+ _i < MAX_NUM_VFS; \
+ _i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
+
#endif
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index dd53ea9c..aad90123 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -10,106 +10,180 @@
#define __IRO_H__
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) \
-(IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) (IRO[2].base + \
+ ((port_id) * IRO[2].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
-(IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[3].base + \
+ ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
-(IRO[4].base + ((pf_id) * IRO[4].m1))
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1))
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) \
-(IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) (IRO[6].base + \
+ ((queue_zone_id) * IRO[6].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
/* Ustorm Common Queue ring consumer */
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \
-(IRO[6].base + ((global_queue_id) * IRO[6].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size)
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) (IRO[7].base + \
+ ((queue_zone_id) * IRO[7].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[14].base + \
+ ((core_rx_queue_id) * IRO[14].m1))
+#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
+/* Pstorm LiteL2 queue statistics */
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
-(IRO[17].base + ((stat_counter_id) * IRO[17].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size)
-/* Mstorm producers */
-#define MSTORM_PRODS_OFFSET(queue_id) \
-(IRO[18].base + ((queue_id) * IRO[18].m1))
-#define MSTORM_PRODS_SIZE (IRO[18].size)
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[18].base + \
+ ((stat_counter_id) * IRO[18].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) (IRO[19].base + \
+ ((queue_id) * IRO[19].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
+ * mode.
+ */
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) (IRO[20].base + \
+ ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size)
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[22].base + ((pf_id) * IRO[22].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
-(IRO[20].base + ((stat_counter_id) * IRO[20].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[20].size)
-/* Ustorm queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
-(IRO[21].base + ((queue_id) * IRO[21].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size)
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[23].base + \
+ ((stat_counter_id) * IRO[23].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
-(IRO[22].base + ((stat_counter_id) * IRO[22].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[25].base + \
+ ((stat_counter_id) * IRO[25].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[26].base + ((pf_id) * IRO[26].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) (IRO[27].base + \
+ ((ethType_id) * IRO[27].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
-(IRO[24].base + ((pf_id) * IRO[24].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size)
-/* Ystorm queue zone */
-#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
-(IRO[25].base + ((queue_id) * IRO[25].m1))
-#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size)
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[29].base + ((pf_id) * IRO[29].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[30].base + \
+ ((queue_id) * IRO[30].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
-(IRO[26].base + ((rss_id) * IRO[26].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size)
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[31].base + \
+ ((rss_id) * IRO[31].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size)
/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
-(IRO[27].base + ((rss_id) * IRO[27].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size)
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[32].base + \
+ ((rss_id) * IRO[32].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
-(IRO[28].base + ((pf_id) * IRO[28].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size)
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[33].base + \
+ ((pf_id) * IRO[33].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size)
/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
-(IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size)
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
-(IRO[30].base + ((func_id) * IRO[30].m1) + ((bdq_id) * IRO[30].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[30].size)
-/* Mstorm rq-cons of given queue-id */
-#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
-(IRO[31].base + ((rq_queue_id) * IRO[31].m1))
-#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[31].size)
-/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
-(IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[34].base + \
+ ((cmdq_queue_id) * IRO[34].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id
+ */
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[35].base + \
+ ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[36].base + \
+ ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[37].base + \
+ ((pf_id) * IRO[37].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[38].base + \
+ ((pf_id) * IRO[38].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[39].base + \
+ ((pf_id) * IRO[39].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[40].base + \
+ ((pf_id) * IRO[40].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[41].base + \
+ ((pf_id) * IRO[41].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[42].base + \
+ ((pf_id) * IRO[42].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[43].base + \
+ ((pf_id) * IRO[43].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size)
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[44].base + \
+ ((pf_id) * IRO[44].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size)
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
+ ((rdma_stat_counter_id) * IRO[46].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index c818b580..43e01e47 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -9,51 +9,101 @@
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
-static const struct iro iro_arr[44] = {
- {0x0, 0x0, 0x0, 0x0, 0x8},
- {0x4db0, 0x60, 0x0, 0x0, 0x60},
- {0x6418, 0x20, 0x0, 0x0, 0x20},
- {0x500, 0x8, 0x0, 0x0, 0x4},
- {0x480, 0x8, 0x0, 0x0, 0x4},
- {0x0, 0x8, 0x0, 0x0, 0x2},
- {0x80, 0x8, 0x0, 0x0, 0x2},
- {0x4938, 0x0, 0x0, 0x0, 0x78},
- {0x3df0, 0x0, 0x0, 0x0, 0x78},
- {0x29b0, 0x0, 0x0, 0x0, 0x78},
- {0x4d38, 0x0, 0x0, 0x0, 0x78},
- {0x56c8, 0x0, 0x0, 0x0, 0x78},
- {0x7e48, 0x0, 0x0, 0x0, 0x78},
- {0xa28, 0x8, 0x0, 0x0, 0x8},
- {0x61f8, 0x10, 0x0, 0x0, 0x10},
- {0xb500, 0x30, 0x0, 0x0, 0x30},
- {0x95b8, 0x30, 0x0, 0x0, 0x30},
- {0x5898, 0x40, 0x0, 0x0, 0x40},
- {0x1f8, 0x10, 0x0, 0x0, 0x8},
- {0xa228, 0x0, 0x0, 0x0, 0x4},
- {0x8050, 0x40, 0x0, 0x0, 0x30},
- {0xcf8, 0x8, 0x0, 0x0, 0x8},
- {0x2b48, 0x80, 0x0, 0x0, 0x38},
- {0xadf0, 0x0, 0x0, 0x0, 0xf0},
- {0xaee0, 0x8, 0x0, 0x0, 0x8},
- {0x80, 0x8, 0x0, 0x0, 0x8},
- {0xac0, 0x8, 0x0, 0x0, 0x8},
- {0x2578, 0x8, 0x0, 0x0, 0x8},
- {0x24f8, 0x8, 0x0, 0x0, 0x8},
- {0x0, 0x8, 0x0, 0x0, 0x8},
- {0x200, 0x10, 0x8, 0x0, 0x8},
- {0x17f8, 0x8, 0x0, 0x0, 0x2},
- {0x19f8, 0x10, 0x8, 0x0, 0x2},
- {0xd988, 0x38, 0x0, 0x0, 0x24},
- {0x11040, 0x10, 0x0, 0x0, 0x8},
- {0x11670, 0x38, 0x0, 0x0, 0x18},
- {0xaeb8, 0x30, 0x0, 0x0, 0x10},
- {0x86f8, 0x28, 0x0, 0x0, 0x18},
- {0xebf8, 0x10, 0x0, 0x0, 0x10},
- {0xde08, 0x40, 0x0, 0x0, 0x30},
- {0x121a0, 0x38, 0x0, 0x0, 0x8},
- {0xf060, 0x20, 0x0, 0x0, 0x20},
- {0x2b80, 0x80, 0x0, 0x0, 0x10},
- {0x50a0, 0x10, 0x0, 0x0, 0x10},
+static const struct iro iro_arr[47] = {
+/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
+ { 0x0, 0x0, 0x0, 0x0, 0x8},
+/* TSTORM_PORT_STAT_OFFSET(port_id) */
+ { 0x4cb0, 0x78, 0x0, 0x0, 0x78},
+/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
+ { 0x6318, 0x20, 0x0, 0x0, 0x20},
+/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
+ { 0xb00, 0x8, 0x0, 0x0, 0x4},
+/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
+ { 0xa80, 0x8, 0x0, 0x0, 0x4},
+/* USTORM_EQE_CONS_OFFSET(pf_id) */
+ { 0x0, 0x8, 0x0, 0x0, 0x2},
+/* USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) */
+ { 0x80, 0x8, 0x0, 0x0, 0x4},
+/* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
+ { 0x84, 0x8, 0x0, 0x0, 0x2},
+/* XSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x4bc0, 0x0, 0x0, 0x0, 0x78},
+/* YSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x3df0, 0x0, 0x0, 0x0, 0x78},
+/* PSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x29b0, 0x0, 0x0, 0x0, 0x78},
+/* TSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x4c38, 0x0, 0x0, 0x0, 0x78},
+/* MSTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x4990, 0x0, 0x0, 0x0, 0x78},
+/* USTORM_INTEG_TEST_DATA_OFFSET */
+ { 0x7e48, 0x0, 0x0, 0x0, 0x78},
+/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
+ { 0xa28, 0x8, 0x0, 0x0, 0x8},
+/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
+ { 0x60f8, 0x10, 0x0, 0x0, 0x10},
+/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
+ { 0xb820, 0x30, 0x0, 0x0, 0x30},
+/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
+ { 0x95b8, 0x30, 0x0, 0x0, 0x30},
+/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
+ { 0x4b60, 0x80, 0x0, 0x0, 0x40},
+/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
+ { 0x1f8, 0x4, 0x0, 0x0, 0x4},
+/* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
+ { 0x53a0, 0x80, 0x4, 0x0, 0x4},
+/* MSTORM_TPA_TIMEOUT_US_OFFSET */
+ { 0xc8f0, 0x0, 0x0, 0x0, 0x4},
+/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
+ { 0x4ba0, 0x80, 0x0, 0x0, 0x20},
+/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
+ { 0x8050, 0x40, 0x0, 0x0, 0x30},
+/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
+ { 0xe770, 0x60, 0x0, 0x0, 0x60},
+/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
+ { 0x2b48, 0x80, 0x0, 0x0, 0x38},
+/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
+ { 0xf188, 0x78, 0x0, 0x0, 0x78},
+/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
+ { 0x1f8, 0x4, 0x0, 0x0, 0x4},
+/* TSTORM_ETH_PRS_INPUT_OFFSET */
+ { 0xacf0, 0x0, 0x0, 0x0, 0xf0},
+/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
+ { 0xade0, 0x8, 0x0, 0x0, 0x8},
+/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
+ { 0x1f8, 0x8, 0x0, 0x0, 0x8},
+/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
+ { 0xac0, 0x8, 0x0, 0x0, 0x8},
+/* USTORM_TOE_CQ_PROD_OFFSET(rss_id) */
+ { 0x2578, 0x8, 0x0, 0x0, 0x8},
+/* USTORM_TOE_GRQ_PROD_OFFSET(pf_id) */
+ { 0x24f8, 0x8, 0x0, 0x0, 0x8},
+/* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
+ { 0x0, 0x8, 0x0, 0x0, 0x8},
+/* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
+ { 0x200, 0x10, 0x8, 0x0, 0x8},
+/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
+ { 0xb78, 0x10, 0x8, 0x0, 0x2},
+/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
+ { 0xd888, 0x38, 0x0, 0x0, 0x24},
+/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
+ { 0x12c38, 0x10, 0x0, 0x0, 0x8},
+/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
+ { 0x11aa0, 0x38, 0x0, 0x0, 0x18},
+/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
+ { 0xa8c0, 0x30, 0x0, 0x0, 0x10},
+/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
+ { 0x86f8, 0x28, 0x0, 0x0, 0x18},
+/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
+ { 0x101f8, 0x10, 0x0, 0x0, 0x10},
+/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
+ { 0xdd08, 0x48, 0x0, 0x0, 0x38},
+/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
+ { 0x10660, 0x20, 0x0, 0x0, 0x20},
+/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
+ { 0x2b80, 0x80, 0x0, 0x0, 0x10},
+/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
+ { 0x5000, 0x10, 0x0, 0x0, 0x10},
};
#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 9e6ef5a4..74f61b00 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -35,9 +35,9 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
{
struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
u8 abs_vport_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
u16 rx_mode = 0;
rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
@@ -84,6 +84,8 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
+ p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
+ p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
break;
@@ -97,6 +99,9 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->tx_switching_en = 0;
#endif
+ p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
+ p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
p_params->concrete_fid);
@@ -202,10 +207,12 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
static void
ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
- struct ecore_filter_accept_flags flags)
+ struct ecore_filter_accept_flags accept_flags)
{
- p_ramrod->common.update_rx_mode_flg = flags.update_rx_mode_config;
- p_ramrod->common.update_tx_mode_flg = flags.update_tx_mode_config;
+ p_ramrod->common.update_rx_mode_flg =
+ accept_flags.update_rx_mode_config;
+ p_ramrod->common.update_tx_mode_flg =
+ accept_flags.update_tx_mode_config;
#ifndef ASIC_ONLY
/* On B0 emulation we cannot enable Tx, since this would cause writes
@@ -220,74 +227,55 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
/* Set Rx mode accept flags */
if (p_ramrod->common.update_rx_mode_flg) {
- __le16 *state = &p_ramrod->rx_mode.state;
- u8 accept_filter = flags.rx_accept_filter;
-
-/*
- * SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
- * !!(accept_filter & ECORE_ACCEPT_NONE));
- */
-
- SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL,
- (!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) &&
- !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+ u8 accept_filter = accept_flags.rx_accept_filter;
+ u16 state = 0;
- SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
!(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
- !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+ !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
- SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
!!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
-/*
- * SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
- * !!(accept_filter & ECORE_ACCEPT_NONE));
- */
- SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
!(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
!!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
- SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
!!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
- SET_FIELD(*state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+ SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
!!(accept_filter & ECORE_ACCEPT_BCAST));
+ p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"p_ramrod->rx_mode.state = 0x%x\n",
- p_ramrod->rx_mode.state);
+ state);
}
/* Set Tx mode accept flags */
if (p_ramrod->common.update_tx_mode_flg) {
- __le16 *state = &p_ramrod->tx_mode.state;
- u8 accept_filter = flags.tx_accept_filter;
+ u8 accept_filter = accept_flags.tx_accept_filter;
+ u16 state = 0;
- SET_FIELD(*state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
!!(accept_filter & ECORE_ACCEPT_NONE));
- SET_FIELD(*state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
!!(accept_filter & ECORE_ACCEPT_NONE));
- SET_FIELD(*state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
!!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
- SET_FIELD(*state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+ SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
!!(accept_filter & ECORE_ACCEPT_BCAST));
- /* @DPDK */
- /* ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL and
- * ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL
- * needs to be set for VF-VF communication to work
- * when dest macaddr is unknown.
- */
- SET_FIELD(*state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
- (!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) &&
- !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
+ p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"p_ramrod->tx_mode.state = 0x%x\n",
- p_ramrod->tx_mode.state);
+ state);
}
}
@@ -351,12 +339,12 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
struct ecore_spq_comp_cb *p_comp_data)
{
struct ecore_rss_params *p_rss_params = p_params->rss_params;
+ struct vport_update_ramrod_data_cmn *p_cmn;
+ struct ecore_sp_init_data init_data;
struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
- struct ecore_sp_init_data init_data;
u8 abs_vport_id = 0, val;
- u16 wordval;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
if (IS_VF(p_hwfn->p_dev)) {
rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
@@ -382,30 +370,31 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
/* Copy input params to ramrod according to FW struct */
p_ramrod = &p_ent->ramrod.vport_update;
+ p_cmn = &p_ramrod->common;
- p_ramrod->common.vport_id = abs_vport_id;
+ p_cmn->vport_id = abs_vport_id;
+
+ p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+ p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+ p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+ p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
- p_ramrod->common.rx_active_flg = p_params->vport_active_rx_flg;
- p_ramrod->common.tx_active_flg = p_params->vport_active_tx_flg;
- val = p_params->update_vport_active_rx_flg;
- p_ramrod->common.update_rx_active_flg = val;
- val = p_params->update_vport_active_tx_flg;
- p_ramrod->common.update_tx_active_flg = val;
+ p_cmn->accept_any_vlan = p_params->accept_any_vlan;
+ val = p_params->update_accept_any_vlan_flg;
+ p_cmn->update_accept_any_vlan_flg = val;
+
+ p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
val = p_params->update_inner_vlan_removal_flg;
- p_ramrod->common.update_inner_vlan_removal_en_flg = val;
- val = p_params->inner_vlan_removal_flg;
- p_ramrod->common.inner_vlan_removal_en = val;
- val = p_params->silent_vlan_removal_flg;
- p_ramrod->common.silent_vlan_removal_en = val;
- val = p_params->update_tx_switching_flg;
- p_ramrod->common.update_tx_switching_en_flg = val;
+ p_cmn->update_inner_vlan_removal_en_flg = val;
+
+ p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
val = p_params->update_default_vlan_enable_flg;
- p_ramrod->common.update_default_vlan_en_flg = val;
- p_ramrod->common.default_vlan_en = p_params->default_vlan_enable_flg;
- val = p_params->update_default_vlan_flg;
- p_ramrod->common.update_default_vlan_flg = val;
- wordval = p_params->default_vlan;
- p_ramrod->common.default_vlan = OSAL_CPU_TO_LE16(wordval);
+ p_cmn->update_default_vlan_en_flg = val;
+
+ p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
+ p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
+
+ p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
@@ -419,17 +408,15 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
p_ramrod->common.update_tx_switching_en_flg = 1;
}
#endif
+ p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+ p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
val = p_params->update_anti_spoofing_en_flg;
p_ramrod->common.update_anti_spoofing_en_flg = val;
- p_ramrod->common.anti_spoofing_en = p_params->anti_spoofing_en;
- p_ramrod->common.accept_any_vlan = p_params->accept_any_vlan;
- val = p_params->update_accept_any_vlan_flg;
- p_ramrod->common.update_accept_any_vlan_flg = val;
rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc != ECORE_SUCCESS) {
- /* Return spq entry which is taken in ecore_sp_init_request() */
+ /* Return spq entry which is taken in ecore_sp_init_request()*/
ecore_spq_return_entry(p_hwfn, p_ent);
return rc;
}
@@ -440,6 +427,11 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
p_params->sge_tpa_params);
+ if (p_params->mtu) {
+ p_ramrod->common.update_mtu_flg = 1;
+ p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
+ }
+
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
@@ -449,8 +441,8 @@ enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
struct vport_stop_ramrod_data *p_ramrod;
struct ecore_sp_init_data init_data;
struct ecore_spq_entry *p_ent;
- enum _ecore_status_t rc;
u8 abs_vport_id = 0;
+ enum _ecore_status_t rc;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_vport_stop(p_hwfn);
@@ -499,20 +491,20 @@ ecore_filter_accept_cmd(struct ecore_dev *p_dev,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
- struct ecore_sp_vport_update_params update_params;
+ struct ecore_sp_vport_update_params vport_update_params;
int i, rc;
/* Prepare and send the vport rx_mode change */
- OSAL_MEMSET(&update_params, 0, sizeof(update_params));
- update_params.vport_id = vport;
- update_params.accept_flags = accept_flags;
- update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
- update_params.accept_any_vlan = accept_any_vlan;
+ OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport;
+ vport_update_params.accept_flags = accept_flags;
+ vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
+ vport_update_params.accept_any_vlan = accept_any_vlan;
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
if (IS_VF(p_dev)) {
rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
@@ -521,7 +513,7 @@ ecore_filter_accept_cmd(struct ecore_dev *p_dev,
continue;
}
- rc = ecore_sp_vport_update(p_hwfn, &update_params,
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
@@ -556,39 +548,38 @@ enum _ecore_status_t
ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
- u16 rx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_start_common_params *p_params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, bool b_use_zone_a_prod)
{
- struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
+ struct ecore_hw_cid_data *p_rx_cid;
u16 abs_rx_q_id = 0;
u8 abs_vport_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Store information for the stop */
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
p_rx_cid->cid = cid;
p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = vport_id;
+ p_rx_cid->vport_id = p_params->vport_id;
- rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+ rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
if (rc != ECORE_SUCCESS)
return rc;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, cid, rx_queue_id, vport_id, sb);
+ opaque_fid, cid, p_params->queue_id,
+ p_params->vport_id, p_params->sb);
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -604,10 +595,10 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
- p_ramrod->sb_index = sb_index;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
+ p_ramrod->sb_index = (u8)p_params->sb_idx;
p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->stats_counter_id = p_params->stats_id;
p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
p_ramrod->complete_cqe_flg = 0;
p_ramrod->complete_event_flg = 1;
@@ -618,73 +609,82 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
+ if (p_params->vf_qid || b_use_zone_a_prod) {
+ p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Queue%s is meant for VF rxq[%02x]\n",
+ b_use_zone_a_prod ? " [legacy]" : "",
+ p_params->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+ }
+
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u8 rx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM * *pp_prod)
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM * *pp_prod)
{
- struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
- u8 abs_stats_id = 0;
+ struct ecore_hw_cid_data *p_rx_cid;
+ u32 init_prod_val = 0;
u16 abs_l2_queue = 0;
+ u8 abs_stats_id = 0;
enum _ecore_status_t rc;
- u64 init_prod_val = 0;
if (IS_VF(p_hwfn->p_dev)) {
return ecore_vf_pf_rxq_start(p_hwfn,
- rx_queue_id,
- sb,
- sb_index,
+ p_params->queue_id,
+ p_params->sb,
+ (u8)p_params->sb_idx,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr,
cqe_pbl_size, pp_prod);
}
- rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_l2_queue);
+ rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+ rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
if (rc != ECORE_SUCCESS)
return rc;
*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM + MSTORM_PRODS_OFFSET(abs_l2_queue);
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
- __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
/* Allocate a CID for the queue */
- rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid);
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
+ rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &p_rx_cid->cid);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
return rc;
}
p_rx_cid->b_cid_allocated = true;
+ p_params->stats_id = abs_stats_id;
+ p_params->vf_qid = 0;
rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
opaque_fid,
p_rx_cid->cid,
- rx_queue_id,
- vport_id,
- abs_stats_id,
- sb,
- sb_index,
+ p_params,
bd_max_bytes,
bd_chain_phys_addr,
- cqe_pbl_addr, cqe_pbl_size);
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ false);
if (rc != ECORE_SUCCESS)
ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
@@ -703,10 +703,10 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
{
struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
struct ecore_hw_cid_data *p_rx_cid;
u16 qid, abs_rx_q_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 i;
if (IS_VF(p_hwfn->p_dev))
@@ -758,9 +758,9 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
u16 abs_rx_q_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
@@ -788,7 +788,7 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
* In addition, VFs require the answer to come as eqe to PF.
*/
p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
- p_hwfn->hw_info.opaque_fid) &&
+ p_hwfn->hw_info.opaque_fid) &&
!eq_completion_only) || cqe_completion;
p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
p_hwfn->hw_info.opaque_fid) ||
@@ -806,33 +806,30 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
- u16 tx_queue_id,
u32 cid,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
u16 pbl_size,
union ecore_qm_pq_params *p_pq_params)
{
- struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
+ struct ecore_hw_cid_data *p_tx_cid;
u16 pq_id, abs_tx_q_id = 0;
u8 abs_vport_id;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Store information for the stop */
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
p_tx_cid->cid = cid;
p_tx_cid->opaque_fid = opaque_fid;
- rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_fw_l2_queue(p_hwfn, tx_queue_id, &abs_tx_q_id);
+ rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
if (rc != ECORE_SUCCESS)
return rc;
@@ -851,15 +848,14 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.tx_queue_start;
p_ramrod->vport_id = abs_vport_id;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
- p_ramrod->sb_index = sb_index;
- p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
+ p_ramrod->sb_index = (u8)p_params->sb_idx;
+ p_ramrod->stats_counter_id = p_params->stats_id;
p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
- p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr);
- p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr);
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
@@ -867,37 +863,40 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u16 tx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM * *pp_doorbell)
+enum _ecore_status_t
+ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void OSAL_IOMEM * *pp_doorbell)
{
- struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+ struct ecore_hw_cid_data *p_tx_cid;
union ecore_qm_pq_params pq_params;
- enum _ecore_status_t rc;
u8 abs_stats_id = 0;
+ enum _ecore_status_t rc;
if (IS_VF(p_hwfn->p_dev)) {
return ecore_vf_pf_txq_start(p_hwfn,
- tx_queue_id,
- sb,
- sb_index,
- pbl_addr, pbl_size, pp_doorbell);
+ p_params->queue_id,
+ p_params->sb,
+ (u8)p_params->sb_idx,
+ pbl_addr,
+ pbl_size,
+ pp_doorbell);
}
- rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+ rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
if (rc != ECORE_SUCCESS)
return rc;
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
+ pq_params.eth.tc = tc;
+
/* Allocate a CID for the queue */
rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
if (rc != ECORE_SUCCESS) {
@@ -908,18 +907,19 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, p_tx_cid->cid, tx_queue_id, vport_id, sb);
+ opaque_fid, p_tx_cid->cid, p_params->queue_id,
+ p_params->vport_id, p_params->sb);
+
+ p_params->stats_id = abs_stats_id;
/* TODO - set tc in the pq_params for multi-cos */
rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
opaque_fid,
- tx_queue_id,
p_tx_cid->cid,
- vport_id,
- abs_stats_id,
- sb,
- sb_index,
- pbl_addr, pbl_size, &pq_params);
+ p_params,
+ pbl_addr,
+ pbl_size,
+ &pq_params);
*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
@@ -939,10 +939,9 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
u16 tx_queue_id)
{
struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
- struct tx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
@@ -959,8 +958,6 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- p_ramrod = &p_ent->ramrod.tx_queue_stop;
-
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1011,8 +1008,8 @@ ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
- struct vport_filter_update_ramrod_data *p_ramrod;
u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+ struct vport_filter_update_ramrod_data *p_ramrod;
struct eth_filter_cmd *p_first_filter;
struct eth_filter_cmd *p_second_filter;
struct ecore_sp_init_data init_data;
@@ -1243,7 +1240,7 @@ static u32 ecore_calc_crc32c(u8 *crc32_packet,
return crc32_result;
}
-static OSAL_INLINE u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
+static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
{
u32 packet_buf[2] = { 0 };
@@ -1266,18 +1263,22 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
- struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+ struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- enum _ecore_status_t rc;
u8 abs_vport_id = 0;
+ enum _ecore_status_t rc;
int i;
- rc = ecore_fw_vport(p_hwfn,
- (p_filter_cmd->opcode == ECORE_FILTER_ADD) ?
- p_filter_cmd->vport_to_add_to :
- p_filter_cmd->vport_to_remove_from, &abs_vport_id);
+ if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
+ rc = ecore_fw_vport(p_hwfn,
+ p_filter_cmd->vport_to_add_to,
+ &abs_vport_id);
+ else
+ rc = ecore_fw_vport(p_hwfn,
+ p_filter_cmd->vport_to_remove_from,
+ &abs_vport_id);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1304,11 +1305,10 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
0, sizeof(p_ramrod->approx_mcast.bins));
OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
ETH_MULTICAST_MAC_BINS_IN_REGS);
-
+ /* filter ADD op is explicit set op and it removes
+ * any existing filters for the vport.
+ */
if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
- /* filter ADD op is explicit set op and it removes
- * any existing filters for the vport.
- */
for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
u32 bit;
@@ -1353,14 +1353,16 @@ ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ u16 opaque_fid;
if (IS_VF(p_dev)) {
ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
continue;
}
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = ecore_sp_eth_filter_mcast(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
+ opaque_fid,
p_filter_cmd,
comp_mode, p_comp_data);
if (rc != ECORE_SUCCESS)
@@ -1381,14 +1383,16 @@ ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ u16 opaque_fid;
if (IS_VF(p_dev)) {
rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
continue;
}
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = ecore_sp_eth_filter_ucast(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
+ opaque_fid,
p_filter_cmd,
comp_mode, p_comp_data);
if (rc != ECORE_SUCCESS)
@@ -1398,77 +1402,6 @@ ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
return rc;
}
-/* IOV related */
-enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
- u32 concrete_vfid, u16 opaque_vfid)
-{
- struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
- struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
- struct ecore_sp_init_data init_data;
-
- /* Get SPQ entry */
- OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = ecore_spq_get_cid(p_hwfn);
- init_data.opaque_fid = opaque_vfid;
- init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
-
- rc = ecore_sp_init_request(p_hwfn, &p_ent,
- COMMON_RAMROD_VF_START,
- PROTOCOLID_COMMON, &init_data);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- p_ramrod = &p_ent->ramrod.vf_start;
-
- p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
- p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(opaque_vfid);
-
- switch (p_hwfn->hw_info.personality) {
- case ECORE_PCI_ETH:
- p_ramrod->personality = PERSONALITY_ETH;
- break;
- default:
- DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
- p_hwfn->hw_info.personality);
- return ECORE_INVAL;
- }
-
- return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
-}
-
-enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn)
-{
- return ECORE_NOTIMPL;
-}
-
-enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
- u32 concrete_vfid, u16 opaque_vfid)
-{
- enum _ecore_status_t rc = ECORE_NOTIMPL;
- struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
- struct ecore_spq_entry *p_ent = OSAL_NULL;
- struct ecore_sp_init_data init_data;
-
- /* Get SPQ entry */
- OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = ecore_spq_get_cid(p_hwfn);
- init_data.opaque_fid = opaque_vfid;
- init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
-
- rc = ecore_sp_init_request(p_hwfn, &p_ent,
- COMMON_RAMROD_VF_STOP,
- PROTOCOLID_COMMON, &init_data);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- p_ramrod = &p_ent->ramrod.vf_stop;
-
- p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
-
- return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
-}
-
/* Statistics related code */
static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
u32 *p_addr, u32 *p_len,
@@ -1636,51 +1569,51 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
OFFSETOF(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.pmm.r64;
- p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
- p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
- p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
- p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
- p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
- p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
- p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
- p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
- p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
- p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
- p_stats->rx_crc_errors += port_stats.pmm.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
- p_stats->rx_pause_frames += port_stats.pmm.rxpf;
- p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
- p_stats->rx_align_errors += port_stats.pmm.raln;
- p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
- p_stats->rx_oversize_packets += port_stats.pmm.rovr;
- p_stats->rx_jabbers += port_stats.pmm.rjbr;
- p_stats->rx_undersize_packets += port_stats.pmm.rund;
- p_stats->rx_fragments += port_stats.pmm.rfrg;
- p_stats->tx_64_byte_packets += port_stats.pmm.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
- p_stats->tx_pause_frames += port_stats.pmm.txpf;
- p_stats->tx_pfc_frames += port_stats.pmm.txpp;
- p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
- p_stats->tx_total_collisions += port_stats.pmm.tncl;
- p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
- p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
- p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
- p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+ p_stats->rx_64_byte_packets += port_stats.eth.r64;
+ p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
+ p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
+ p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
+ p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
+ p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
+ p_stats->rx_crc_errors += port_stats.eth.rfcs;
+ p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_stats->rx_pause_frames += port_stats.eth.rxpf;
+ p_stats->rx_pfc_frames += port_stats.eth.rxpp;
+ p_stats->rx_align_errors += port_stats.eth.raln;
+ p_stats->rx_carrier_errors += port_stats.eth.rfcr;
+ p_stats->rx_oversize_packets += port_stats.eth.rovr;
+ p_stats->rx_jabbers += port_stats.eth.rjbr;
+ p_stats->rx_undersize_packets += port_stats.eth.rund;
+ p_stats->rx_fragments += port_stats.eth.rfrg;
+ p_stats->tx_64_byte_packets += port_stats.eth.t64;
+ p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
+ p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
+ p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
+ p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
+ p_stats->tx_pause_frames += port_stats.eth.txpf;
+ p_stats->tx_pfc_frames += port_stats.eth.txpp;
+ p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
+ p_stats->tx_total_collisions += port_stats.eth.tncl;
+ p_stats->rx_mac_bytes += port_stats.eth.rbyte;
+ p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_stats->tx_mac_bytes += port_stats.eth.tbyte;
+ p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
p_stats->brb_discards += port_stats.brb.brb_discard[j];
@@ -1737,7 +1670,7 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
IS_PF(p_dev) ? true : false);
out:
- if (IS_PF(p_dev))
+ if (IS_PF(p_dev) && p_ptt)
ecore_ptt_release(p_hwfn, p_ptt);
}
}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index b0850ca4..9c1bd388 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -9,62 +9,13 @@
#ifndef __ECORE_L2_H__
#define __ECORE_L2_H__
+
#include "ecore.h"
#include "ecore_hw.h"
#include "ecore_spq.h"
#include "ecore_l2_api.h"
/**
- * @brief ecore_sp_vf_start - VF Function Start
- *
- * This ramrod is sent to initialize a virtual function (VF) is loaded.
- * It will configure the function related parameters.
- *
- * @note Final phase API.
- *
- * @param p_hwfn
- * @param concrete_vfid VF ID
- * @param opaque_vfid
- *
- * @return enum _ecore_status_t
- */
-
-enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
- u32 concrete_vfid, u16 opaque_vfid);
-
-/**
- * @brief ecore_sp_vf_update - VF Function Update Ramrod
- *
- * This ramrod performs updates of a virtual function (VF).
- * It currently contains no functionality.
- *
- * @note Final phase API.
- *
- * @param p_hwfn
- *
- * @return enum _ecore_status_t
- */
-
-enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn);
-
-/**
- * @brief ecore_sp_vf_stop - VF Function Stop Ramrod
- *
- * This ramrod is sent to unload a virtual function (VF).
- *
- * @note Final phase API.
- *
- * @param p_hwfn
- * @param concrete_vfid
- * @param opaque_vfid
- *
- * @return enum _ecore_status_t
- */
-
-enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
- u32 concrete_vfid, u16 opaque_vfid);
-
-/**
* @brief ecore_sp_eth_tx_queue_update -
*
* This ramrod updates a TX queue. It is used for setting the active
@@ -89,31 +40,25 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param opaque_fid
* @param cid
- * @param rx_queue_id
- * @param vport_id
- * @param stats_id
- * @param sb
- * @param sb_index
+ * @param p_params [queue_id, vport_id, stats_id, sb, sb_idx, vf_qid]
+ stats_id is absolute packed in p_params.
* @param bd_max_bytes
* @param bd_chain_phys_addr
* @param cqe_pbl_addr
* @param cqe_pbl_size
- * @param leading
+ * @param b_use_zone_a_prod - support legacy VF producers
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
- u16 rx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_start_common_params *p_params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, bool b_use_zone_a_prod);
/**
* @brief - Starts a Tx queue; Should be used where contexts are handled
@@ -121,12 +66,8 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param opaque_fid
- * @param tx_queue_id
* @param cid
- * @param vport_id
- * @param stats_id
- * @param sb
- * @param sb_index
+ * @param p_params [queue_id, vport_id,stats_id, sb, sb_idx, vf_qid]
* @param pbl_addr
* @param pbl_size
* @param p_pq_params - parameters for choosing the PQ for this Tx queue
@@ -134,14 +75,10 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
- u16 tx_queue_id,
u32 cid,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
u16 pbl_size,
union ecore_qm_pq_params *p_pq_params);
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index b41dd7fe..326fa45b 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -14,19 +14,31 @@
#ifndef __EXTRACT__LINUX__
enum ecore_rss_caps {
- ECORE_RSS_IPV4 = 0x1,
- ECORE_RSS_IPV6 = 0x2,
- ECORE_RSS_IPV4_TCP = 0x4,
- ECORE_RSS_IPV6_TCP = 0x8,
- ECORE_RSS_IPV4_UDP = 0x10,
- ECORE_RSS_IPV6_UDP = 0x20,
+ ECORE_RSS_IPV4 = 0x1,
+ ECORE_RSS_IPV6 = 0x2,
+ ECORE_RSS_IPV4_TCP = 0x4,
+ ECORE_RSS_IPV6_TCP = 0x8,
+ ECORE_RSS_IPV4_UDP = 0x10,
+ ECORE_RSS_IPV6_UDP = 0x20,
};
/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
#define ECORE_RSS_IND_TABLE_SIZE 128
-#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
+#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
#endif
+struct ecore_queue_start_common_params {
+ /* Rx/Tx queue id */
+ u8 queue_id;
+ u8 vport_id;
+
+ /* stats_id is relative or absolute depends on function */
+ u8 stats_id;
+ u16 sb;
+ u16 sb_idx;
+ u16 vf_qid;
+};
+
struct ecore_rss_params {
u8 update_rss_config;
u8 rss_enable;
@@ -35,7 +47,7 @@ struct ecore_rss_params {
u8 update_rss_ind_table;
u8 update_rss_key;
u8 rss_caps;
- u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
u32 rss_key[ECORE_RSS_KEY_SIZE];
};
@@ -63,8 +75,8 @@ enum ecore_filter_opcode {
ECORE_FILTER_ADD,
ECORE_FILTER_REMOVE,
ECORE_FILTER_MOVE,
- ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
- ECORE_FILTER_FLUSH, /* Removes all filters */
+ ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ ECORE_FILTER_FLUSH, /* Removes all filters */
};
enum ecore_filter_ucast_type {
@@ -97,7 +109,7 @@ struct ecore_filter_mcast {
enum ecore_filter_opcode opcode;
u8 vport_to_add_to;
u8 vport_to_remove_from;
- u8 num_mc_addrs;
+ u8 num_mc_addrs;
#define ECORE_MAX_MC_ADDRS 64
unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
};
@@ -137,13 +149,14 @@ ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
/* Set "accept" filters */
enum _ecore_status_t
-ecore_filter_accept_cmd(struct ecore_dev *p_dev,
- u8 vport,
- struct ecore_filter_accept_flags accept_flags,
- u8 update_accept_any_vlan,
- u8 accept_any_vlan,
- enum spq_mode comp_mode,
- struct ecore_spq_comp_cb *p_comp_data);
+ecore_filter_accept_cmd(
+ struct ecore_dev *p_dev,
+ u8 vport,
+ struct ecore_filter_accept_flags accept_flags,
+ u8 update_accept_any_vlan,
+ u8 accept_any_vlan,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
/**
* @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod
@@ -153,14 +166,7 @@ ecore_filter_accept_cmd(struct ecore_dev *p_dev,
*
* @param p_hwfn
* @param opaque_fid
- * @param rx_queue_id RX Queue ID: Zero based, per VPort, allocated
- * by assignment (=rssId)
- * @param vport_id VPort ID
- * @param u8 stats_id VPort ID which the queue stats
- * will be added to
- * @param sb Status Block of the Function Event Ring
- * @param sb_index Index into the status block of the
- * Function Event Ring
+ * @p_params [stats_id is relative, packed in p_params]
* @param bd_max_bytes Maximum bytes that can be placed on a BD
* @param bd_chain_phys_addr Physical address of BDs for receive.
* @param cqe_pbl_addr Physical address of the CQE PBL Table.
@@ -171,18 +177,15 @@ ecore_filter_accept_cmd(struct ecore_dev *p_dev,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u8 rx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM * *pp_prod);
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM * *pp_prod);
/**
* @brief ecore_sp_eth_rx_queue_stop -
@@ -204,7 +207,8 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
u16 rx_queue_id,
- bool eq_completion_only, bool cqe_completion);
+ bool eq_completion_only,
+ bool cqe_completion);
/**
* @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
@@ -214,32 +218,24 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param opaque_fid
- * @param tx_queue_id TX Queue ID
- * @param vport_id VPort ID
- * @param stats_id VPort ID which the queue stats
- * will be added to
- * @param sb Status Block of the Function Event Ring
- * @param sb_index Index into the status block of the Function
- * Event Ring
+ * @p_params
+ * @param tc traffic class to use with this L2 txq
* @param pbl_addr address of the pbl array
* @param pbl_size number of entries in pbl
* @param pp_doorbell Pointer to place doorbell pointer (May be NULL).
- * This address should be used with the
+ * This address should be used with the
* DIRECT_REG_WR macro.
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u16 tx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM * *
- pp_doorbell);
+enum _ecore_status_t
+ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void OSAL_IOMEM * *pp_doorbell);
/**
* @brief ecore_sp_eth_tx_queue_stop -
@@ -255,7 +251,7 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
u16 tx_queue_id);
-enum ecore_tpa_mode {
+enum ecore_tpa_mode {
ECORE_TPA_MODE_NONE,
ECORE_TPA_MODE_RSC,
ECORE_TPA_MODE_GRO,
@@ -275,6 +271,8 @@ struct ecore_sp_vport_start_params {
u8 vport_id; /* VPORT ID */
u16 mtu; /* VPORT MTU */
bool zero_placement_offset;
+ bool check_mac;
+ bool check_ethtype;
};
/**
@@ -293,30 +291,34 @@ ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params);
struct ecore_sp_vport_update_params {
- u16 opaque_fid;
- u8 vport_id;
- u8 update_vport_active_rx_flg;
- u8 vport_active_rx_flg;
- u8 update_vport_active_tx_flg;
- u8 vport_active_tx_flg;
- u8 update_inner_vlan_removal_flg;
- u8 inner_vlan_removal_flg;
- u8 silent_vlan_removal_flg;
- u8 update_default_vlan_enable_flg;
- u8 default_vlan_enable_flg;
- u8 update_default_vlan_flg;
- u16 default_vlan;
- u8 update_tx_switching_flg;
- u8 tx_switching_flg;
- u8 update_approx_mcast_flg;
- u8 update_anti_spoofing_en_flg;
- u8 anti_spoofing_en;
- u8 update_accept_any_vlan_flg;
- u8 accept_any_vlan;
- unsigned long bins[8];
- struct ecore_rss_params *rss_params;
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_inner_vlan_removal_flg;
+ u8 inner_vlan_removal_flg;
+ u8 silent_vlan_removal_flg;
+ u8 update_default_vlan_enable_flg;
+ u8 default_vlan_enable_flg;
+ u8 update_default_vlan_flg;
+ u16 default_vlan;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ unsigned long bins[8];
+ struct ecore_rss_params *rss_params;
struct ecore_filter_accept_flags accept_flags;
struct ecore_sge_tpa_params *sge_tpa_params;
+ /* MTU change - notice this requires the vport to be disabled.
+ * If non-zero, value would be used.
+ */
+ u16 mtu;
};
/**
@@ -351,7 +353,8 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid, u8 vport_id);
+ u16 opaque_fid,
+ u8 vport_id);
enum _ecore_status_t
ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 9dd2eed3..2ff97155 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -15,6 +15,7 @@
#include "ecore_hw.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_sriov.h"
+#include "ecore_vf.h"
#include "ecore_iov_api.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
@@ -135,7 +136,8 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
PUBLIC_DRV_MB));
p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+ "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x"
+ " mcp_pf_id = 0x%x\n",
drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
/* Set the MFW MB address */
@@ -201,6 +203,52 @@ err:
return ECORE_NOMEM;
}
+/* Locks the MFW mailbox of a PF to ensure a single access.
+ * The lock is achieved in most cases by holding a spinlock, causing other
+ * threads to wait till a previous access is done.
+ * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
+ * access is achieved by setting a blocking flag, which will fail other
+ * competing contexts to send their mailboxes.
+ */
+static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
+ u32 cmd)
+{
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+
+ /* The spinlock shouldn't be acquired when the mailbox command is
+ * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
+ * pending [UN]LOAD_REQ command of another PF together with a spinlock
+ * (i.e. interrupts are disabled) - can lead to a deadlock.
+ * It is assumed that for a single PF, no other mailbox commands can be
+ * sent from another context while sending LOAD_REQ, and that any
+ * parallel commands to UNLOAD_REQ can be cancelled.
+ */
+ if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ if (p_hwfn->mcp_info->block_mb_sending) {
+ DP_NOTICE(p_hwfn, false,
+ "Trying to send a MFW mailbox command [0x%x]"
+ " in parallel to [UN]LOAD_REQ. Aborting.\n",
+ cmd);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ return ECORE_BUSY;
+ }
+
+ if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
+ p_hwfn->mcp_info->block_mb_sending = true;
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
+{
+ if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+}
+
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -214,7 +262,12 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
delay = EMUL_MCP_RESP_ITER_US;
#endif
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
+ /* Ensure that only a single thread is accessing the mailbox at a
+ * certain time.
+ */
+ rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ if (rc != ECORE_SUCCESS)
+ return rc;
/* Set drv command along with the updated sequence */
org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
@@ -237,12 +290,11 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
rc = ECORE_AGAIN;
}
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
return rc;
}
-/* Should be called while the dedicated spinlock is acquired */
static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd, u32 param,
@@ -250,12 +302,16 @@ static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
u32 *o_mcp_param)
{
u32 delay = CHIP_MCP_RESP_ITER_US;
+ u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
u32 seq, cnt = 1, actual_mb_seq;
enum _ecore_status_t rc = ECORE_SUCCESS;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
delay = EMUL_MCP_RESP_ITER_US;
+ /* There is a built-in delay of 100usec in each MFW response read */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ max_retries /= 10;
#endif
/* Get actual driver mailbox sequence */
@@ -279,10 +335,6 @@ static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
/* Set drv command along with the updated sequence */
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "wrote command (%x) to MFW MB param 0x%08x\n",
- (cmd | seq), param);
-
do {
/* Wait for MFW response */
OSAL_UDELAY(delay);
@@ -290,11 +342,7 @@ static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
/* Give the FW up to 5 second (500*10ms) */
} while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
- (cnt++ < ECORE_DRV_MB_MAX_RETRIES));
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "[after %d ms] read (%x) seq is (%x) from FW MB\n",
- cnt * delay, *o_mcp_resp, seq);
+ (cnt++ < max_retries));
/* Is this a reply to our command? */
if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
@@ -312,10 +360,56 @@ static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
return rc;
}
+static enum _ecore_status_t
+ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params)
+{
+ u32 union_data_addr;
+ enum _ecore_status_t rc;
+
+ /* MCP not initialized */
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
+ return ECORE_BUSY;
+ }
+
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb, union_data);
+
+ /* Ensure that only a single thread is accessing the mailbox at a
+ * certain time.
+ */
+ rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (p_mb_params->p_data_src != OSAL_NULL)
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
+ p_mb_params->p_data_src,
+ sizeof(*p_mb_params->p_data_src));
+
+ rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
+ p_mb_params->param, &p_mb_params->mcp_resp,
+ &p_mb_params->mcp_param);
+
+ if (p_mb_params->p_data_dst != OSAL_NULL)
+ ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr,
+ sizeof(*p_mb_params->p_data_dst));
+
+ ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+
+ return rc;
+}
+
enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 cmd, u32 param,
u32 *o_mcp_resp, u32 *o_mcp_param)
{
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
@@ -328,44 +422,17 @@ enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
}
#endif
- return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, OSAL_NULL,
- o_mcp_resp, o_mcp_param);
-}
-
-enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 cmd, u32 param,
- union drv_union_data *p_union_data,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
-{
- u32 union_data_addr;
- enum _ecore_status_t rc;
-
- /* MCP not initialized */
- if (!ecore_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
- return ECORE_BUSY;
- }
-
- /* Acquiring a spinlock is needed to ensure that only a single thread
- * is accessing the mailbox at a certain time.
- */
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
-
- if (p_union_data != OSAL_NULL) {
- union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
- OFFSETOF(struct public_drv_mb, union_data);
- ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, p_union_data,
- sizeof(*p_union_data));
- }
-
- rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
- o_mcp_param);
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
- return rc;
+ return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
@@ -376,12 +443,23 @@ enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
u32 *o_mcp_param,
u32 i_txn_size, u32 *i_buf)
{
+ struct ecore_mcp_mb_params mb_params;
union drv_union_data union_data;
+ enum _ecore_status_t rc;
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
+ mb_params.p_data_src = &union_data;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
- return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, &union_data,
- o_mcp_resp, o_mcp_param);
+ return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
@@ -392,30 +470,25 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
u32 *o_mcp_param,
u32 *o_txn_size, u32 *o_buf)
{
+ struct ecore_mcp_mb_params mb_params;
+ union drv_union_data union_data;
enum _ecore_status_t rc;
- u32 i;
- /* MCP not initialized */
- if (!ecore_mcp_is_init(p_hwfn)) {
- DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
- return ECORE_BUSY;
- }
-
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
- rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
- o_mcp_param);
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_dst = &union_data;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
- goto out;
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
- /* Get payload after operation completes successfully */
*o_txn_size = *o_mcp_param;
- for (i = 0; i < *o_txn_size; i += 4)
- o_buf[i / sizeof(u32)] = DRV_MB_RD(p_hwfn, p_ptt,
- union_data.raw_data[i]);
+ OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
-out:
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
- return rc;
+ return ECORE_SUCCESS;
}
#ifndef ASIC_ONLY
@@ -451,8 +524,8 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
u32 *p_load_code)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_mcp_mb_params mb_params;
union drv_union_data union_data;
- u32 param;
enum _ecore_status_t rc;
#ifndef ASIC_ONLY
@@ -462,12 +535,13 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
}
#endif
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+ mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
+ p_dev->drv_type;
OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
-
- rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
- (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
- p_dev->drv_type),
- &union_data, p_load_code, &param);
+ mb_params.p_data_src = &union_data;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
if (rc != ECORE_SUCCESS) {
@@ -475,6 +549,8 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return rc;
}
+ *p_load_code = mb_params.mcp_resp;
+
/* If MFW refused (e.g. other port is in diagnostic mode) we
* must abort. This can happen in the following cases:
* - Other port is in diagnostic mode
@@ -534,8 +610,8 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
+ struct ecore_mcp_mb_params mb_params;
union drv_union_data union_data;
- u32 resp, param;
enum _ecore_status_t rc;
int i;
@@ -544,13 +620,14 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
"Acking VFs [%08x,...,%08x] - %08x\n",
i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
-
+ mb_params.p_data_src = &union_data;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
- DRV_MSG_CODE_VF_DISABLED_DONE, 0,
- &union_data, &resp, &param);
+ &mb_params);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
+ DP_NOTICE(p_hwfn, false,
"Failed to pass ACK for VF flr to MFW\n");
return ECORE_TIMEOUT;
}
@@ -577,23 +654,25 @@ static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
"Received transceiver state update [0x%08x] from mfw"
- "[Addr 0x%x]\n",
+ " [Addr 0x%x]\n",
transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
OFFSETOF(struct public_port,
transceiver_data)));
- transceiver_state = GET_FIELD(transceiver_state, PMM_TRANSCEIVER_STATE);
+ transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
- if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
+ if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
else
DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
}
static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, bool b_reset)
+ struct ecore_ptt *p_ptt,
+ bool b_reset)
{
struct ecore_mcp_link_state *p_link;
+ u8 max_bw, min_bw;
u32 status = 0;
p_link = &p_hwfn->mcp_info->link_output;
@@ -657,23 +736,18 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
else
p_link->line_speed = 0;
- /* Correct speed according to bandwidth allocation */
- if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
- u8 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
-
- __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
- p_link, max_bw);
- }
-
- if (p_hwfn->mcp_info->func_info.bandwidth_min && p_link->speed) {
- u8 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+ max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+ min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
- __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
- p_link, min_bw);
+ /* Max bandwidth configuration */
+ __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
- ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
- p_link->min_pf_rate);
- }
+ /* Mintz bandwidth configuration */
+ __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
+ p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
@@ -737,10 +811,11 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, bool b_up)
{
struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+ struct ecore_mcp_mb_params mb_params;
union drv_union_data union_data;
- struct pmm_phy_cfg *p_phy_cfg;
- u32 param = 0, reply = 0, cmd;
+ struct eth_phy_cfg *p_phy_cfg;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 cmd;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
@@ -753,21 +828,11 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
p_phy_cfg->speed = params->speed.forced_speed;
- p_phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
- p_phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
- p_phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
p_phy_cfg->adv_speed = params->speed.advertised_speeds;
p_phy_cfg->loopback_mode = params->loopback_mode;
-
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
- DP_INFO(p_hwfn,
- "Link on FPGA - Ask for loopback mode '5' at 10G\n");
- p_phy_cfg->loopback_mode = 5;
- p_phy_cfg->speed = 10000;
- }
-#endif
-
p_hwfn->b_drv_link_init = b_up;
if (b_up)
@@ -781,8 +846,10 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
else
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
- rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, 0, &union_data, &reply,
- &param);
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.p_data_src = &union_data;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
if (rc != ECORE_SUCCESS) {
@@ -859,8 +926,9 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
{
enum ecore_mcp_protocol_type stats_type;
union ecore_mcp_protocol_stats stats;
- u32 hsi_param, param = 0, reply = 0;
+ struct ecore_mcp_mb_params mb_params;
union drv_union_data union_data;
+ u32 hsi_param;
switch (type) {
case MFW_DRV_MSG_GET_LAN_STATS:
@@ -874,30 +942,12 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_STATS;
+ mb_params.param = hsi_param;
OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
-
- ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_GET_STATS,
- hsi_param, &union_data, &reply, &param);
-}
-
-static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct public_func *p_data, int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- OSAL_MEM_ZERO(p_data, sizeof(*p_data));
-
- size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
-
- return size;
+ mb_params.p_data_src = &union_data;
+ ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
}
static void
@@ -935,6 +985,28 @@ ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
}
}
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+ size = OSAL_MIN_T(u32, sizeof(*p_data),
+ SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
+}
+
static void
ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
@@ -971,6 +1043,154 @@ static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
}
+static enum _ecore_status_t
+ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 mdump_cmd, union drv_union_data *p_data_src,
+ union drv_union_data *p_data_dst, u32 *p_mcp_resp)
+{
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
+ mb_params.param = mdump_cmd;
+ mb_params.p_data_src = p_data_src;
+ mb_params.p_data_dst = p_data_dst;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = mb_params.mcp_resp;
+ if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ DP_NOTICE(p_hwfn, false,
+ "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
+ mdump_cmd);
+ rc = ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
+ OSAL_NULL, OSAL_NULL, &mcp_resp);
+}
+
+enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 epoch)
+{
+ union drv_union_data union_data;
+ u32 mcp_resp;
+
+ OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
+ &union_data, OSAL_NULL, &mcp_resp);
+}
+
+enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
+ OSAL_NULL, OSAL_NULL, &mcp_resp);
+}
+
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
+ OSAL_NULL, OSAL_NULL, &mcp_resp);
+}
+
+static enum _ecore_status_t
+ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct mdump_config_stc *p_mdump_config)
+{
+ union drv_union_data union_data;
+ u32 mcp_resp;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
+ OSAL_NULL, &union_data, &mcp_resp);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* A zero response implies that the mdump command is not supported */
+ if (!mcp_resp)
+ return ECORE_NOTIMPL;
+
+ if (mcp_resp != FW_MSG_CODE_OK) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
+ mcp_resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ }
+
+ OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
+ sizeof(*p_mdump_config));
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct mdump_config_stc mdump_config;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump_config: version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
+ mdump_config.version, mdump_config.config, mdump_config.epoc,
+ mdump_config.num_of_logs, mdump_config.valid_logs);
+
+ if (mdump_config.valid_logs > 0) {
+ DP_NOTICE(p_hwfn, false,
+ "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
+ }
+
+ return rc;
+}
+
+void ecore_mcp_mdump_enable(struct ecore_dev *p_dev, bool mdump_enable)
+{
+ p_dev->mdump_en = mdump_enable;
+}
+
+static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ /* In CMT mode - no need for more than a single acknowledgment to the
+ * MFW, and no more than a single notification to the upper driver.
+ */
+ if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
+ return;
+
+ DP_NOTICE(p_hwfn, false,
+ "Received a critical error notification from the MFW!\n");
+
+ if (p_hwfn->p_dev->mdump_en) {
+ DP_NOTICE(p_hwfn, false,
+ "Not acknowledging the notification to allow the MFW crash dump\n");
+ return;
+ }
+
+ ecore_mcp_mdump_ack(p_hwfn, p_ptt);
+ ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
+}
+
enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -1014,6 +1234,9 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
ECORE_DCBX_OPERATIONAL_MIB);
break;
+ case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+ ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_ERROR_RECOVERY:
ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
break;
@@ -1026,12 +1249,12 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
case MFW_DRV_MSG_BW_UPDATE:
ecore_mcp_update_bw(p_hwfn, p_ptt);
break;
- case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
- ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
- break;
case MFW_DRV_MSG_FAILURE_DETECTED:
ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
+ ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
+ break;
default:
/* @DPDK */
DP_NOTICE(p_hwfn, false,
@@ -1064,33 +1287,32 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
return rc;
}
-enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_mfw_ver,
u32 *p_running_bundle_id)
{
- struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
u32 global_offsize;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_dev)) {
- DP_NOTICE(p_dev, false, "Emulation - can't get MFW version\n");
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, false, "Emulation - can't get MFW version\n");
return ECORE_SUCCESS;
}
#endif
- if (IS_VF(p_dev)) {
+ if (IS_VF(p_hwfn->p_dev)) {
if (p_hwfn->vf_iov_info) {
struct pfvf_acquire_resp_tlv *p_resp;
p_resp = &p_hwfn->vf_iov_info->acquire_resp;
*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
return ECORE_SUCCESS;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF requested MFW version prior to ACQUIRE\n");
+ return ECORE_INVAL;
}
-
- DP_VERBOSE(p_dev, ECORE_MSG_IOV,
- "VF requested MFW vers prior to ACQUIRE\n");
- return ECORE_INVAL;
}
global_offsize = ecore_rd(p_hwfn, p_ptt,
@@ -1192,18 +1414,25 @@ enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
}
+ /* TODO - are these calculations true for BE machine? */
+ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
+ (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
+ info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
+ (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
+
info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
"Read configuration from shmem: pause_on_host %02x"
" protocol %02x BW [%02x - %02x]"
- " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %" PRIx64
- " node %" PRIx64 " ovlan %04x\n",
+ " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %lx"
+ " node %lx ovlan %04x\n",
info->pause_on_host, info->protocol,
info->bandwidth_min, info->bandwidth_max,
info->mac[0], info->mac[1], info->mac[2],
info->mac[3], info->mac[4], info->mac[5],
- info->wwn_port, info->wwn_node, info->ovlan);
+ (unsigned long)info->wwn_port,
+ (unsigned long)info->wwn_node, info->ovlan);
return ECORE_SUCCESS;
}
@@ -1243,14 +1472,14 @@ struct ecore_mcp_link_capabilities
enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- enum _ecore_status_t rc;
u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
rc = ecore_mcp_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NIG_DRAIN, 100, &resp, &param);
+ DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
/* Wait for the drain to complete before returning */
- OSAL_MSLEEP(120);
+ OSAL_MSLEEP(1020);
return rc;
}
@@ -1376,6 +1605,12 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
u32 resp = 0, param = 0, rc_param = 0;
enum _ecore_status_t rc;
+/* Only Leader can configure MSIX, and need to take CMT into account */
+
+ if (!IS_LEAD_HWFN(p_hwfn))
+ return ECORE_SUCCESS;
+ num *= p_hwfn->p_dev->num_hwfns;
+
param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
@@ -1388,6 +1623,10 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
vf_id);
rc = ECORE_INVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
+ num, vf_id);
}
return rc;
@@ -1397,9 +1636,10 @@ enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_drv_version *p_ver)
{
- u32 param = 0, reply = 0, num_words, i;
struct drv_version_stc *p_drv_version;
+ struct ecore_mcp_mb_params mb_params;
union drv_union_data union_data;
+ u32 num_words, i;
void *p_name;
OSAL_BE32 val;
enum _ecore_status_t rc;
@@ -1418,8 +1658,10 @@ ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
*(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
- rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0,
- &union_data, &reply, &param);
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
+ mb_params.p_data_src = &union_data;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
@@ -1614,6 +1856,14 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
break;
}
+
+ /* This can be a lengthy process, and it's possible scheduler
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
+ */
+ if (bytes_left % 0x1000 <
+ (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
+ OSAL_MSLEEP(1);
+
offset += *params.nvm_rd.buf_size;
bytes_left -= *params.nvm_rd.buf_size;
}
@@ -1751,6 +2001,13 @@ enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ /* This can be a lengthy process, and it's possible scheduler
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
+ */
+ if (buf_idx % 0x1000 >
+ (buf_idx + buf_size) % 0x1000)
+ OSAL_MSLEEP(1);
+
buf_idx += buf_size;
}
@@ -1821,10 +2078,9 @@ enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
u32 bytes_left, bytes_to_copy, buf_size;
OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- SET_FIELD(params.nvm_common.offset,
- DRV_MB_PARAM_TRANSCEIVER_PORT, port);
- SET_FIELD(params.nvm_common.offset,
- DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
+ params.nvm_common.offset =
+ (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
addr = offset;
offset = 0;
bytes_left = len;
@@ -1835,10 +2091,14 @@ enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
MAX_I2C_TRANSACTION_SIZE);
params.nvm_rd.buf = (u32 *)(p_buf + offset);
- SET_FIELD(params.nvm_common.offset,
- DRV_MB_PARAM_TRANSCEIVER_OFFSET, addr + offset);
- SET_FIELD(params.nvm_common.offset,
- DRV_MB_PARAM_TRANSCEIVER_SIZE, bytes_to_copy);
+ params.nvm_common.offset &=
+ (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ params.nvm_common.offset |=
+ ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
+ params.nvm_common.offset |=
+ (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
@@ -1864,20 +2124,23 @@ enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
u32 buf_idx, buf_size;
OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- SET_FIELD(params.nvm_common.offset,
- DRV_MB_PARAM_TRANSCEIVER_PORT, port);
- SET_FIELD(params.nvm_common.offset,
- DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
+ params.nvm_common.offset =
+ (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
params.type = ECORE_MCP_NVM_WR;
params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
buf_idx = 0;
while (buf_idx < len) {
buf_size = OSAL_MIN_T(u32, (len - buf_idx),
MAX_I2C_TRANSACTION_SIZE);
- SET_FIELD(params.nvm_common.offset,
- DRV_MB_PARAM_TRANSCEIVER_OFFSET, offset + buf_idx);
- SET_FIELD(params.nvm_common.offset,
- DRV_MB_PARAM_TRANSCEIVER_SIZE, buf_size);
+ params.nvm_common.offset &=
+ (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ params.nvm_common.offset |=
+ ((offset + buf_idx) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
+ params.nvm_common.offset |=
+ (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
params.nvm_wr.buf_size = buf_size;
params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
@@ -1901,11 +2164,14 @@ enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 drv_mb_param = 0, rsp;
- SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
drv_mb_param, &rsp, gpio_val);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
return ECORE_UNKNOWN_ERROR;
@@ -1919,14 +2185,269 @@ enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 drv_mb_param = 0, param, rsp;
- SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
- SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_VALUE, gpio_val);
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
+ (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
drv_mb_param, &rsp, &param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
return ECORE_UNKNOWN_ERROR;
return ECORE_SUCCESS;
}
+
+enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_direction,
+ u32 *gpio_ctrl)
+{
+ u32 drv_mb_param = 0, rsp, val = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
+ drv_mb_param, &rsp, &val);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
+ DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
+ *gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
+ DRV_MB_PARAM_GPIO_CTRL_SHIFT;
+
+ if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
+ return ECORE_UNKNOWN_ERROR;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 drv_mb_param = 0, rsp, param;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 drv_mb_param = 0, rsp, param;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *num_images)
+{
+ u32 drv_mb_param = 0, rsp;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, num_images);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att, u32 image_index)
+{
+ struct ecore_mcp_nvm_params params;
+ enum _ecore_status_t rc;
+ u32 buf_size;
+
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
+ params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ params.nvm_common.offset |= (image_index <<
+ DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
+
+ params.type = ECORE_MCP_NVM_RD;
+ params.nvm_rd.buf_size = &buf_size;
+ params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
+ params.nvm_rd.buf = (u32 *)p_image_att;
+
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (p_image_att->return_code != 1))
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_temperature_info *p_temp_info)
+{
+ struct ecore_temperature_sensor *p_temp_sensor;
+ struct temperature_status_stc *p_mfw_temp_info;
+ struct ecore_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ u32 val;
+ enum _ecore_status_t rc;
+ u8 i;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
+ mb_params.p_data_dst = &union_data;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_mfw_temp_info = &union_data.temp_info;
+
+ OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
+ p_temp_info->num_sensors = OSAL_MIN_T(u32,
+ p_mfw_temp_info->num_of_sensors,
+ ECORE_MAX_NUM_OF_SENSORS);
+ for (i = 0; i < p_temp_info->num_sensors; i++) {
+ val = p_mfw_temp_info->sensor[i];
+ p_temp_sensor = &p_temp_info->sensors[i];
+ p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
+ SENSOR_LOCATION_SHIFT;
+ p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
+ THRESHOLD_HIGH_SHIFT;
+ p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
+ CRITICAL_TEMPERATURE_SHIFT;
+ p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
+ CURRENT_TEMP_SHIFT;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_mba_versions(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mba_vers *p_mba_vers)
+{
+ struct ecore_mcp_nvm_params params;
+ enum _ecore_status_t rc;
+ u32 buf_size;
+
+ OSAL_MEM_ZERO(&params, sizeof(params));
+ params.type = ECORE_MCP_NVM_RD;
+ params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
+ params.nvm_common.offset = 0;
+ params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
+ params.nvm_rd.buf_size = &buf_size;
+ rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_NVM_OK)
+ rc = ECORE_UNKNOWN_ERROR;
+
+ if (buf_size != MCP_DRV_NVM_BUF_LEN)
+ rc = ECORE_UNKNOWN_ERROR;
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 *num_events)
+{
+ u32 rsp;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MEM_ECC_EVENTS,
+ 0, &rsp, (u32 *)num_events);
+}
+
+#define ECORE_RESC_ALLOC_VERSION_MAJOR 1
+#define ECORE_RESC_ALLOC_VERSION_MINOR 0
+#define ECORE_RESC_ALLOC_VERSION \
+ ((ECORE_RESC_ALLOC_VERSION_MAJOR << \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
+ (ECORE_RESC_ALLOC_VERSION_MINOR << \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
+
+enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct resource_info *p_resc_info,
+ u32 *p_mcp_resp, u32 *p_mcp_param)
+{
+ struct ecore_mcp_mb_params mb_params;
+ union drv_union_data *p_union_data;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ mb_params.param = ECORE_RESC_ALLOC_VERSION;
+ p_union_data = (union drv_union_data *)p_resc_info;
+ mb_params.p_data_src = p_union_data;
+ mb_params.p_data_dst = p_union_data;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = mb_params.mcp_resp;
+ *p_mcp_param = mb_params.mcp_param;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x,"
+ " offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
+ *p_mcp_param, p_resc_info->res_id, p_resc_info->size,
+ p_resc_info->offset, p_resc_info->vf_size,
+ p_resc_info->vf_offset, p_resc_info->flags);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR,
+ 0, &mcp_resp, &mcp_param);
+}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 448c30bb..831890ca 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -25,23 +25,35 @@
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
-/* TODO - this is only correct as long as only BB is supported, and
- * no port-swapping is implemented; Afterwards we'll need to fix it.
- */
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->p_dev->num_ports_in_engines * 2))
+ ((_p_hwfn)->p_dev->num_ports_in_engines * \
+ ecore_device_num_engines((_p_hwfn)->p_dev)))
+
struct ecore_mcp_info {
- osal_spinlock_t lock; /* Spinlock used for accessing MCP mailbox */
- u32 public_base; /* Address of the MCP public area */
- u32 drv_mb_addr; /* Address of the driver mailbox */
- u32 mfw_mb_addr; /* Address of the MFW mailbox */
- u32 port_addr; /* Address of the port configuration (link) */
- u16 drv_mb_seq; /* Current driver mailbox sequence */
- u16 drv_pulse_seq; /* Current driver pulse sequence */
- struct ecore_mcp_link_params link_input;
- struct ecore_mcp_link_state link_output;
+ /* Spinlock used for protecting the access to the MFW mailbox */
+ osal_spinlock_t lock;
+ /* Flag to indicate whether sending a MFW mailbox is forbidden */
+ bool block_mb_sending;
+
+ /* Address of the MCP public area */
+ u32 public_base;
+ /* Address of the driver mailbox */
+ u32 drv_mb_addr;
+ /* Address of the MFW mailbox */
+ u32 mfw_mb_addr;
+ /* Address of the port configuration (link) */
+ u32 port_addr;
+
+ /* Current driver mailbox sequence */
+ u16 drv_mb_seq;
+ /* Current driver pulse sequence */
+ u16 drv_pulse_seq;
+
+ struct ecore_mcp_link_params link_input;
+ struct ecore_mcp_link_state link_output;
struct ecore_mcp_link_capabilities link_capabilities;
- struct ecore_mcp_function_info func_info;
+
+ struct ecore_mcp_function_info func_info;
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
@@ -49,6 +61,15 @@ struct ecore_mcp_info {
u16 mcp_hist;
};
+struct ecore_mcp_mb_params {
+ u32 cmd;
+ u32 param;
+ union drv_union_data *p_data_src;
+ union drv_union_data *p_data_dst;
+ u32 mcp_resp;
+ u32 mcp_param;
+};
+
/**
* @brief Initialize the interface with the MCP
*
@@ -140,7 +161,8 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
*/
-void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief Ack to mfw that driver finished FLR process for VFs
@@ -177,28 +199,6 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
- * @brief - Sets the union data in the MCP mailbox and sends a mailbox command.
- *
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param cmd - command to be sent to the MCP
- * @param param - optional param
- * @param p_union_data - pointer to a drv_union_data
- * @param o_mcp_resp - the MCP response code (exclude sequence)
- * @param o_mcp_param - optional parameter provided by the MCP response
- *
- * @return enum _ecore_status_t -
- * ECORE_SUCCESS - operation was successful
- * ECORE_BUSY - operation failed
- */
-enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 cmd, u32 param,
- union drv_union_data *p_union_data,
- u32 *o_mcp_resp,
- u32 *o_mcp_param);
-
-/**
* @brief - Sends an NVM write command request to the MFW with
* payload.
*
@@ -220,7 +220,8 @@ enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param,
- u32 i_txn_size, u32 *i_buf);
+ u32 i_txn_size,
+ u32 *i_buf);
/**
* @brief - Sends an NVM read command request to the MFW to get
@@ -244,7 +245,8 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param,
- u32 *o_txn_size, u32 *o_buf);
+ u32 *o_txn_size,
+ u32 *o_buf);
/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
@@ -301,4 +303,65 @@ int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 mask_parities);
+/**
+ * @brief - Sends crash mdump related info to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 epoch);
+
+/**
+ * @brief - Triggers a MFW crash dump procedure.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Clears the MFW crash dump logs.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Gets the MFW crash dump configuration and logs info.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct resource_info *p_resc_info,
+ u32 *p_mcp_resp, u32 *p_mcp_param);
+
+/**
+ * @brief - Initiates PF FLR
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 7360b356..c26b4943 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -13,8 +13,8 @@
struct ecore_mcp_link_speed_params {
bool autoneg;
- u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
- u32 forced_speed; /* In Mb/s */
+ u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */
+ u32 forced_speed; /* In Mb/s */
};
struct ecore_mcp_link_pause_params {
@@ -26,19 +26,21 @@ struct ecore_mcp_link_pause_params {
struct ecore_mcp_link_params {
struct ecore_mcp_link_speed_params speed;
struct ecore_mcp_link_pause_params pause;
- u32 loopback_mode; /* in PMM_LOOPBACK values */
+ u32 loopback_mode; /* in PMM_LOOPBACK values */
};
struct ecore_mcp_link_capabilities {
u32 speed_capabilities;
+ bool default_speed_autoneg; /* In Mb/s */
+ u32 default_speed; /* In Mb/s */
};
struct ecore_mcp_link_state {
bool link_up;
- u32 line_speed; /* In Mb/s */
- u32 min_pf_rate; /* In Mb/s */
- u32 speed; /* In Mb/s */
+ u32 line_speed; /* In Mb/s */
+ u32 min_pf_rate; /* In Mb/s */
+ u32 speed; /* In Mb/s */
bool full_duplex;
bool an;
@@ -115,6 +117,13 @@ struct ecore_mcp_nvm_params {
};
};
+#ifndef __EXTRACT__LINUX__
+enum ecore_nvm_images {
+ ECORE_NVM_IMAGE_ISCSI_CFG,
+ ECORE_NVM_IMAGE_FCOE_CFG,
+};
+#endif
+
struct ecore_mcp_drv_version {
u32 version;
u8 name[MCP_DRV_VER_STR_SIZE - 4];
@@ -128,13 +137,39 @@ struct ecore_mcp_lan_stats {
#ifndef ECORE_PROTO_STATS
#define ECORE_PROTO_STATS
+struct ecore_mcp_fcoe_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct ecore_mcp_iscsi_stats {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct ecore_mcp_rdma_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_byts;
+};
enum ecore_mcp_protocol_type {
ECORE_MCP_LAN_STATS,
+ ECORE_MCP_FCOE_STATS,
+ ECORE_MCP_ISCSI_STATS,
+ ECORE_MCP_RDMA_STATS
};
union ecore_mcp_protocol_stats {
struct ecore_mcp_lan_stats lan_stats;
+ struct ecore_mcp_fcoe_stats fcoe_stats;
+ struct ecore_mcp_iscsi_stats iscsi_stats;
+ struct ecore_mcp_rdma_stats rdma_stats;
};
#endif
@@ -171,6 +206,35 @@ enum ecore_led_mode {
};
#endif
+struct ecore_temperature_sensor {
+ u8 sensor_location;
+ u8 threshold_high;
+ u8 critical;
+ u8 current_temp;
+};
+
+#define ECORE_MAX_NUM_OF_SENSORS 7
+struct ecore_temperature_info {
+ u32 num_sensors;
+ struct ecore_temperature_sensor sensors[ECORE_MAX_NUM_OF_SENSORS];
+};
+
+enum ecore_mba_img_idx {
+ ECORE_MBA_LEGACY_IDX,
+ ECORE_MBA_PCI3CLP_IDX,
+ ECORE_MBA_PCI3_IDX,
+ ECORE_MBA_FCODE_IDX,
+ ECORE_EFI_X86_IDX,
+ ECORE_EFI_IPF_IDX,
+ ECORE_EFI_EBC_IDX,
+ ECORE_EFI_X64_IDX,
+ ECORE_MAX_NUM_OF_ROMIMG
+};
+
+struct ecore_mba_vers {
+ u32 mba_vers[ECORE_MAX_NUM_OF_ROMIMG];
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -209,19 +273,20 @@ struct ecore_mcp_link_capabilities
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, bool b_up);
+ struct ecore_ptt *p_ptt,
+ bool b_up);
/**
* @brief Get the management firmware version value
*
- * @param p_dev - ecore dev pointer
+ * @param p_hwfn
* @param p_ptt
* @param p_mfw_ver - mfw version value
* @param p_running_bundle_id - image id in nvram; Optional.
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
-enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
+enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_mfw_ver,
u32 *p_running_bundle_id);
@@ -237,7 +302,7 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
* ECORE_BUSY - Operation failed
*/
enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
- u32 *media_type);
+ u32 *media_type);
/**
* @brief - Sends a command to the MCP mailbox.
@@ -267,6 +332,7 @@ enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+#ifndef LINUX_REMOVE
/**
* @brief - return the mcp function info of the hw function
*
@@ -276,6 +342,7 @@ enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
*/
const struct ecore_mcp_function_info
*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn);
+#endif
/**
* @brief - Function for reading/manipulating the nvram. Following are supported
@@ -315,6 +382,7 @@ enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_nvm_params *params);
+#ifndef LINUX_REMOVE
/**
* @brief - count number of function with a matching personality on engine.
*
@@ -326,7 +394,9 @@ enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
* the bitsmasks.
*/
int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 personalities);
+ struct ecore_ptt *p_ptt,
+ u32 personalities);
+#endif
/**
* @brief Get the flash size value
@@ -505,7 +575,8 @@ enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
-enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr);
+enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev,
+ u32 addr);
/**
* @brief Check latest response
@@ -542,7 +613,7 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
- u8 *p_buf, u32 len);
+ u8 *p_buf, u32 len);
/**
* @brief Read from sfp
@@ -608,4 +679,127 @@ enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 gpio, u16 gpio_val);
+/**
+ * @brief Gpio get information
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param gpio - gpio number
+ * @param gpio_direction - gpio is output (0) or input (1)
+ * @param gpio_ctrl - gpio control is uninitialized (0),
+ * path 0 (1), path 1 (2) or shared(3)
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 gpio, u32 *gpio_direction,
+ u32 *gpio_ctrl);
+
+/**
+ * @brief Bist register test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Bist clock test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Bist nvm test - get number of images
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param num_images - number of images if operation was
+ * successful. 0 if not.
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *num_images);
+
+/**
+ * @brief Bist nvm test - get image attributes by index
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_image_att - Attributes of image
+ * @param image_index - Index of image to get information for
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att,
+ u32 image_index);
+
+/**
+ * @brief ecore_mcp_get_temperature_info - get the status of the temperature
+ * sensors
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_temp_status - A pointer to an ecore_temperature_info structure to
+ * be filled with the temperature data
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_temperature_info *p_temp_info);
+
+/**
+ * @brief Get MBA versions - get MBA sub images versions
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param p_mba_vers - MBA versions array to fill
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_get_mba_versions(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mba_vers *p_mba_vers);
+
+/**
+ * @brief Count memory ecc events
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ * @param num_events - number of memory ecc events
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 *num_events);
+
+/**
+ * @brief Sets whether a critical error notification from the MFW is acked, or
+ * is it being ignored and thus allowing the MFW crash dump.
+ *
+ * @param p_dev
+ * @param mdump_enable
+ *
+ */
+void ecore_mcp_mdump_enable(struct ecore_dev *p_dev, bool mdump_enable);
+
#endif
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index 2fecbc86..e252d528 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -13,16 +13,75 @@
* PF parameters (according to personality/protocol)
*/
+#define ECORE_ROCE_PROTOCOL_INDEX (3)
+
struct ecore_eth_pf_params {
/* The following parameters are used during HW-init
* and these parameters need to be passed as arguments
* to update_pf_params routine invoked before slowpath start
*/
- u16 num_cons;
+ u16 num_cons;
+};
+
+/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+struct ecore_iscsi_pf_params {
+ u64 glbl_q_params_addr;
+ u64 bdq_pbl_base_addr[2];
+ u16 cq_num_entries;
+ u16 cmdq_num_entries;
+ u32 two_msl_timer;
+ u16 tx_sws_timer;
+ /* The following parameters are used during HW-init
+ * and these parameters need to be passed as arguments
+ * to update_pf_params routine invoked before slowpath start
+ */
+ u16 num_cons;
+ u16 num_tasks;
+
+ /* The following parameters are used during protocol-init */
+ u16 half_way_close_timeout;
+ u16 bdq_xoff_threshold[2];
+ u16 bdq_xon_threshold[2];
+ u16 cmdq_xoff_threshold;
+ u16 cmdq_xon_threshold;
+ u16 rq_buffer_size;
+
+ u8 num_sq_pages_in_ring;
+ u8 num_r2tq_pages_in_ring;
+ u8 num_uhq_pages_in_ring;
+ u8 num_queues;
+ u8 log_page_size;
+ u8 rqe_log_size;
+ u8 max_fin_rt;
+ u8 gl_rq_pi;
+ u8 gl_cmd_pi;
+ u8 debug_mode;
+ u8 ll2_ooo_queue_id;
+ u8 ooo_enable;
+
+ u8 is_target;
+ u8 bdq_pbl_num_entries[2];
+};
+
+struct ecore_rdma_pf_params {
+ /* Supplied to ECORE during resource allocation (may affect the ILT and
+ * the doorbell BAR).
+ */
+ u32 min_dpis; /* number of requested DPIs */
+ u32 num_mrs; /* number of requested memory regions*/
+ u32 num_qps; /* number of requested Queue Pairs */
+ u32 num_srqs; /* number of requested SRQ */
+ u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
+ u8 gl_pi; /* protocol index */
+
+ /* Will allocate rate limiters to be used with QPs */
+ u8 enable_dcqcn;
};
struct ecore_pf_params {
- struct ecore_eth_pf_params eth_pf_params;
+ struct ecore_eth_pf_params eth_pf_params;
+ struct ecore_iscsi_pf_params iscsi_pf_params;
+ struct ecore_rdma_pf_params rdma_pf_params;
};
#endif
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 1f5139ea..01a29e31 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -10,437 +10,444 @@
#define __RT_DEFS_H__
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30799
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30815
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30849
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31523
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32547
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33059
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33848
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33849
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33850
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33851
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33852
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33853
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33860
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33861
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33862
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33863
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33864
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33865
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33866
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33867
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33868
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33869
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33870
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33871
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33872
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33873
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33874
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33875
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33876
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33877
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33878
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33879
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33880
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33881
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33882
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33883
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33884
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33885
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33886
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33887
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33888
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33889
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33890
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33891
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33892
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33893
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33894
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33895
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33896
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33897
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33898
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33899
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33900
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33901
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33902
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33903
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33904
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33905
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33906
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33907
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33908
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33909
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33910
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33911
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33912
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33913
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33914
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33915
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33916
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33917
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33918
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33919
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33920
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33921
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33922
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33923
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30817
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30851
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
-#define RUNTIME_ARRAY_SIZE 33924
+#define RUNTIME_ARRAY_SIZE 33927
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
index e80f5ef3..a4cb507f 100644
--- a/drivers/net/qede/base/ecore_sp_api.h
+++ b/drivers/net/qede/base/ecore_sp_api.h
@@ -12,9 +12,9 @@
#include "ecore_status.h"
enum spq_mode {
- ECORE_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
- ECORE_SPQ_MODE_CB, /* Client supplies a callback */
- ECORE_SPQ_MODE_EBLOCK, /* ECORE should block until completion */
+ ECORE_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
+ ECORE_SPQ_MODE_CB, /* Client supplies a callback */
+ ECORE_SPQ_MODE_EBLOCK, /* ECORE should block until completion */
};
struct ecore_hwfn;
@@ -22,11 +22,14 @@ union event_ring_data;
struct eth_slow_path_rx_cqe;
struct ecore_spq_comp_cb {
- void (*function)(struct ecore_hwfn *,
- void *, union event_ring_data *, u8 fw_return_code);
- void *cookie;
+ void (*function)(struct ecore_hwfn *,
+ void *,
+ union event_ring_data *,
+ u8 fw_return_code);
+ void *cookie;
};
+
/**
* @brief ecore_eth_cqe_completion - handles the completion of a
* ramrod on the cqe ring
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index e9ac8988..b3736a8c 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -21,6 +21,7 @@
#include "ecore_int.h"
#include "ecore_hw.h"
#include "ecore_dcbx.h"
+#include "ecore_sriov.h"
enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry **pp_ent,
@@ -32,6 +33,9 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent = OSAL_NULL;
enum _ecore_status_t rc = ECORE_NOTIMPL;
+ if (!pp_ent)
+ return ECORE_INVAL;
+
/* Get an SPQ entry */
rc = ecore_spq_get_entry(p_hwfn, pp_ent);
if (rc != ECORE_SUCCESS)
@@ -95,6 +99,8 @@ static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
return TUNNEL_CLSS_INNER_MAC_VLAN;
case ECORE_TUNN_CLSS_INNER_MAC_VNI:
return TUNNEL_CLSS_INNER_MAC_VNI;
+ case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
+ return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
default:
return TUNNEL_CLSS_MAC_VLAN;
}
@@ -323,11 +329,11 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
- struct ecore_spq_entry *p_ent = OSAL_NULL;
u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
u8 sb_index = p_hwfn->p_eq->eq_sb_index;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 page_cnt;
/* update initial eq producer */
@@ -351,7 +357,6 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
p_ramrod->event_ring_sb_index = sb_index;
p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
- p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* For easier debugging */
p_ramrod->dont_log_ramrods = 0;
@@ -370,6 +375,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
"Unsupported MF mode, init as DEFAULT\n");
p_ramrod->mf_mode = MF_NPAR;
}
+ p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -391,12 +397,21 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
break;
default:
DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
- p_hwfn->hw_info.personality);
+ p_hwfn->hw_info.personality);
p_ramrod->personality = PERSONALITY_ETH;
}
- p_ramrod->base_vf_id = (u8)p_hwfn->hw_info.first_vf_in_pf;
- p_ramrod->num_vfs = (u8)p_hwfn->p_dev->sriov_info.total_vfs;
+ if (p_hwfn->p_dev->p_iov_info) {
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+
+ p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8)p_iov->total_vfs;
+ }
+ /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
+ * version is available.
+ */
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
@@ -416,8 +431,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -437,6 +452,49 @@ enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
+enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_rl_update_params *params)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ struct rl_update_ramrod_data *rl_update;
+ struct ecore_sp_init_data init_data;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rl_update = &p_ent->ramrod.rl_update;
+
+ rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
+ rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
+ rl_update->rl_init_flg = params->rl_init_flg;
+ rl_update->rl_start_flg = params->rl_start_flg;
+ rl_update->rl_stop_flg = params->rl_stop_flg;
+ rl_update->rl_id_first = params->rl_id_first;
+ rl_update->rl_id_last = params->rl_id_last;
+ rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
+ rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
+ rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
+ rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
+ rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
+ rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
+ rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
+ rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
+ params->dcqcn_timeuot_us);
+ rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
@@ -445,8 +503,8 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_spq_comp_cb *p_comp_data)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -465,28 +523,27 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
&p_ent->ramrod.pf_update.tunnel_config);
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- if ((rc == ECORE_SUCCESS) && p_tunn) {
- if (p_tunn->update_vxlan_udp_port)
- ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->vxlan_udp_port);
- if (p_tunn->update_geneve_udp_port)
- ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->geneve_udp_port);
+ if (p_tunn->update_vxlan_udp_port)
+ ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_udp_port);
+ if (p_tunn->update_geneve_udp_port)
+ ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_udp_port);
- ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->tunn_mode);
- p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
- }
+ ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
+ p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
return rc;
}
enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
{
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -506,8 +563,8 @@ enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index e281ab0e..66c9a69b 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -21,12 +21,12 @@ struct ecore_sp_init_data {
* e.g., in IOV scenarios. CID might defer between SPQ and
* other elements.
*/
- u32 cid;
- u16 opaque_fid;
+ u32 cid;
+ u16 opaque_fid;
/* Information regarding operation upon sending & completion */
- enum spq_mode comp_mode;
- struct ecore_spq_comp_cb *p_comp_data;
+ enum spq_mode comp_mode;
+ struct ecore_spq_comp_cb *p_comp_data;
};
@@ -134,4 +134,34 @@ enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn);
+struct ecore_rl_update_params {
+ u8 qcn_update_param_flg;
+ u8 dcqcn_update_param_flg;
+ u8 rl_init_flg;
+ u8 rl_start_flg;
+ u8 rl_stop_flg;
+ u8 rl_id_first;
+ u8 rl_id_last;
+ u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */
+ u32 rl_bc_rate; /* Byte Counter Limit */
+ u16 rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
+ u16 rl_r_ai; /* Active increase rate */
+ u16 rl_r_hai; /* Hyper active increase rate */
+ u16 dcqcn_g; /* DCQCN Alpha update gain in 1/64K resolution */
+ u32 dcqcn_k_us; /* DCQCN Alpha update interval */
+ u32 dcqcn_timeuot_us;
+ u32 qcn_timeuot_us;
+};
+
+/**
+ * @brief ecore_sp_rl_update - Update rate limiters
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_rl_update_params *params);
+
#endif /*__ECORE_SP_COMMANDS_H__*/
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index b263693f..0d744ddd 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -27,7 +27,11 @@
***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
-#define SPQ_BLOCK_SLEEP_LENGTH (1000)
+
+#define SPQ_BLOCK_DELAY_MAX_ITER (10)
+#define SPQ_BLOCK_DELAY_US (10)
+#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
+#define SPQ_BLOCK_SLEEP_MS (5)
/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
@@ -48,53 +52,76 @@ static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
OSAL_SMP_WMB(p_hwfn->p_dev);
}
-static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
- struct ecore_spq_entry *p_ent,
- u8 *p_fw_ret)
+static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *p_fw_ret,
+ bool sleep_between_iter)
{
- int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
struct ecore_spq_comp_done *comp_done;
- enum _ecore_status_t rc;
+ u32 iter_cnt;
comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
- while (sleep_count) {
+ iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+ : SPQ_BLOCK_DELAY_MAX_ITER;
+
+ while (iter_cnt--) {
OSAL_POLL_MODE_DPC(p_hwfn);
- /* validate we receive completion update */
OSAL_SMP_RMB(p_hwfn->p_dev);
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return ECORE_SUCCESS;
}
- OSAL_MSLEEP(5);
- sleep_count--;
+
+ if (sleep_between_iter)
+ OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
+ else
+ OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
}
+ return ECORE_TIMEOUT;
+}
+
+static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent,
+ u8 *p_fw_ret, bool skip_quick_poll)
+{
+ struct ecore_spq_comp_done *comp_done;
+ enum _ecore_status_t rc;
+
+ /* A relatively short polling period w/o sleeping, to allow the FW to
+ * complete the ramrod and thus possibly to avoid the following sleeps.
+ */
+ if (!skip_quick_poll) {
+ rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
+ if (rc == ECORE_SUCCESS)
+ return ECORE_SUCCESS;
+ }
+
+ /* Move to polling with a sleeping period between iterations */
+ rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (rc == ECORE_SUCCESS)
+ return ECORE_SUCCESS;
+
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
+ goto err;
+ }
/* Retry after drain */
- sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
- while (sleep_count) {
- /* validate we receive completion update */
- OSAL_SMP_RMB(p_hwfn->p_dev);
- if (comp_done->done == 1) {
- if (p_fw_ret)
- *p_fw_ret = comp_done->fw_return_code;
- return ECORE_SUCCESS;
- }
- OSAL_MSLEEP(5);
- sleep_count--;
- }
+ rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (rc == ECORE_SUCCESS)
+ return ECORE_SUCCESS;
+ comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
if (comp_done->done == 1) {
if (p_fw_ret)
*p_fw_ret = comp_done->fw_return_code;
return ECORE_SUCCESS;
}
-
+err:
DP_NOTICE(p_hwfn, true,
"Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
@@ -157,7 +184,7 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
if (rc < 0) {
- DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
+ DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
p_spq->cid);
return;
}
@@ -187,10 +214,8 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
p_cxt->xstorm_st_context.spq_base_hi =
DMA_HI_LE(p_spq->chain.p_phys_addr);
- p_cxt->xstorm_st_context.consolid_base_addr.lo =
- DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
- p_cxt->xstorm_st_context.consolid_base_addr.hi =
- DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
+ DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
+ p_hwfn->p_consq->chain.p_phys_addr);
}
static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
@@ -218,19 +243,15 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
- /* validate producer is up to-date */
- OSAL_RMB(p_hwfn->p_dev);
-
db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
- /* do not reorder */
- OSAL_BARRIER(p_hwfn->p_dev);
+ /* make sure the SPQE is updated before the doorbell */
+ OSAL_WMB(p_hwfn->p_dev);
DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */
- OSAL_MMIOWB(p_hwfn->p_dev);
+ OSAL_WMB(p_hwfn->p_dev);
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
@@ -305,14 +326,15 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
- "op %x prot %x res0 %x echo %x "
- "fwret %x flags %x\n", p_eqe->opcode,
+ "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+ p_eqe->opcode, /* Event Opcode */
p_eqe->protocol_id, /* Event Protocol ID */
p_eqe->reserved0, /* Reserved */
+ /* Echo value from ramrod data on the host */
OSAL_LE16_TO_CPU(p_eqe->echo),
- p_eqe->fw_return_code, /* FW return code for SP
- * ramrods
- */
+ p_eqe->fw_return_code, /* FW return code for SP
+ * ramrods
+ */
p_eqe->flags);
if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
@@ -338,21 +360,21 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
struct ecore_eq *p_eq;
/* Allocate EQ struct */
- p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq));
+ p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
if (!p_eq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_eq'\n");
return OSAL_NULL;
}
- /* Allocate and initialize EQ chain */
+ /* Allocate and initialize EQ chain*/
if (ecore_chain_alloc(p_hwfn->p_dev,
ECORE_CHAIN_USE_TO_PRODUCE,
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element), &p_eq->chain)) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
+ DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
goto eq_allocate_fail;
}
@@ -419,8 +441,8 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
***************************************************************************/
void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
{
- struct ecore_spq_entry *p_virt = OSAL_NULL;
struct ecore_spq *p_spq = p_hwfn->p_spq;
+ struct ecore_spq_entry *p_virt = OSAL_NULL;
dma_addr_t p_phys = 0;
u32 i, capacity;
@@ -436,8 +458,7 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
capacity = ecore_chain_get_capacity(&p_spq->chain);
for (i = 0; i < capacity; i++) {
- p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
- p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
+ DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
@@ -475,7 +496,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
if (!p_spq) {
DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_spq'");
+ "Failed to allocate `struct ecore_spq'\n");
return ECORE_NOMEM;
}
@@ -484,7 +505,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
/* N/A when the mode is SINGLE */
sizeof(struct slow_path_element), &p_spq->chain)) {
- DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
+ DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
goto spq_allocate_fail;
}
@@ -537,18 +558,18 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
struct ecore_spq_entry *p_ent = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
OSAL_SPIN_LOCK(&p_spq->lock);
if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
- p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
- sizeof(struct ecore_spq_entry));
+ p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
if (!p_ent) {
- OSAL_SPIN_UNLOCK(&p_spq->lock);
DP_NOTICE(p_hwfn, true,
- "Failed to allocate an SPQ entry"
- " for a pending ramrod\n");
- return ECORE_NOMEM;
+ "Failed to allocate an SPQ entry for a pending"
+ " ramrod\n");
+ rc = ECORE_NOMEM;
+ goto out_unlock;
}
p_ent->queue = &p_spq->unlimited_pending;
} else {
@@ -560,9 +581,9 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
*pp_ent = p_ent;
+out_unlock:
OSAL_SPIN_UNLOCK(&p_spq->lock);
-
- return ECORE_SUCCESS;
+ return rc;
}
/* Locked variant; Should be called while the SPQ lock is taken */
@@ -607,32 +628,29 @@ ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
p_spq->unlimited_pending_count++;
return ECORE_SUCCESS;
- }
-
- struct ecore_spq_entry *p_en2;
- p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
- struct ecore_spq_entry,
- list);
- OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
+ } else {
+ struct ecore_spq_entry *p_en2;
- /* Copy the ring element physical pointer to the new
- * entry, since we are about to override the entire ring
- * entry and don't want to lose the pointer.
- */
- p_ent->elem.data_ptr = p_en2->elem.data_ptr;
+ p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
+ struct ecore_spq_entry,
+ list);
+ OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
- /* Setting the cookie to the comp_done of the
- * new element.
- */
- if (p_ent->comp_cb.cookie == &p_ent->comp_done)
- p_ent->comp_cb.cookie = &p_en2->comp_done;
+ /* Copy the ring element physical pointer to the new
+ * entry, since we are about to override the entire ring
+ * entry and don't want to lose the pointer.
+ */
+ p_ent->elem.data_ptr = p_en2->elem.data_ptr;
- *p_en2 = *p_ent;
+ *p_en2 = *p_ent;
- OSAL_FREE(p_hwfn->p_dev, p_ent);
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
+ OSAL_FREE(p_hwfn->p_dev, p_ent);
- p_ent = p_en2;
+ p_ent = p_en2;
+ }
}
/* entry is to be placed in 'pending' queue */
@@ -682,16 +700,22 @@ static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
!OSAL_LIST_IS_EMPTY(head)) {
struct ecore_spq_entry *p_ent =
OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
- OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
- OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
- p_spq->comp_sent_count++;
-
- rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
- if (rc) {
- OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
- &p_spq->completion_pending);
- __ecore_spq_return_entry(p_hwfn, p_ent);
- return rc;
+ if (p_ent != OSAL_NULL) {
+#if defined(_NTDDK_)
+#pragma warning(suppress : 6011 28182)
+#endif
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
+ OSAL_LIST_PUSH_TAIL(&p_ent->list,
+ &p_spq->completion_pending);
+ p_spq->comp_sent_count++;
+
+ rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
+ if (rc) {
+ OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
+ &p_spq->completion_pending);
+ __ecore_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
}
}
@@ -700,7 +724,6 @@ static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
{
- enum _ecore_status_t rc = ECORE_NOTIMPL;
struct ecore_spq *p_spq = p_hwfn->p_spq;
struct ecore_spq_entry *p_ent = OSAL_NULL;
@@ -713,17 +736,16 @@ static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
if (!p_ent)
return ECORE_INVAL;
+#if defined(_NTDDK_)
+#pragma warning(suppress : 6011)
+#endif
OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
}
- rc = ecore_spq_post_list(p_hwfn,
+ return ecore_spq_post_list(p_hwfn,
&p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
- if (rc)
- return rc;
-
- return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
@@ -745,7 +767,7 @@ enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
if (p_hwfn->p_dev->recov_in_prog) {
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Recovery is in progress -> skip spq post"
- " [cmd %02x protocol %02x]",
+ " [cmd %02x protocol %02x]\n",
p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
/* Return success to let the flows to be completed successfully
* w/o any error handling.
@@ -785,7 +807,21 @@ enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
* access p_ent here to see whether it's successful or not.
* Thus, after gaining the answer perform the cleanup here.
*/
- rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code);
+ rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
+ p_ent->queue == &p_spq->unlimited_pending);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ /* This is an allocated p_ent which does not need to
+ * return to pool.
+ */
+ OSAL_FREE(p_hwfn->p_dev, p_ent);
+
+ /* TBD: handle error flow and remove p_ent from
+ * completion pending
+ */
+ return rc;
+ }
+
if (rc)
goto spq_post_fail2;
@@ -885,10 +921,13 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
- if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) {
- /* EBLOCK is responsible for freeing its own entry */
+ if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
+ (found->queue == &p_spq->unlimited_pending))
+ /* EBLOCK is responsible for returning its own entry into the
+ * free list, unless it originally added the entry into the
+ * unlimited pending list.
+ */
ecore_spq_return_entry(p_hwfn, found);
- }
/* Attempt to post pending requests */
OSAL_SPIN_LOCK(&p_spq->lock);
@@ -904,7 +943,7 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
/* Allocate ConsQ struct */
p_consq =
- OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq));
+ OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
if (!p_consq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_consq'\n");
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index 5c168654..717ede30 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -16,24 +16,25 @@
#include "ecore_sp_api.h"
union ramrod_data {
- struct pf_start_ramrod_data pf_start;
- struct pf_update_ramrod_data pf_update;
- struct rx_queue_start_ramrod_data rx_queue_start;
- struct rx_queue_update_ramrod_data rx_queue_update;
- struct rx_queue_stop_ramrod_data rx_queue_stop;
- struct tx_queue_start_ramrod_data tx_queue_start;
- struct tx_queue_stop_ramrod_data tx_queue_stop;
- struct vport_start_ramrod_data vport_start;
- struct vport_stop_ramrod_data vport_stop;
- struct vport_update_ramrod_data vport_update;
- struct core_rx_start_ramrod_data core_rx_queue_start;
- struct core_rx_stop_ramrod_data core_rx_queue_stop;
- struct core_tx_start_ramrod_data core_tx_queue_start;
- struct core_tx_stop_ramrod_data core_tx_queue_stop;
- struct vport_filter_update_ramrod_data vport_filter_update;
-
- struct vf_start_ramrod_data vf_start;
- struct vf_stop_ramrod_data vf_stop;
+ struct pf_start_ramrod_data pf_start;
+ struct pf_update_ramrod_data pf_update;
+ struct rl_update_ramrod_data rl_update;
+ struct rx_queue_start_ramrod_data rx_queue_start;
+ struct rx_queue_update_ramrod_data rx_queue_update;
+ struct rx_queue_stop_ramrod_data rx_queue_stop;
+ struct tx_queue_start_ramrod_data tx_queue_start;
+ struct tx_queue_stop_ramrod_data tx_queue_stop;
+ struct vport_start_ramrod_data vport_start;
+ struct vport_stop_ramrod_data vport_stop;
+ struct vport_update_ramrod_data vport_update;
+ struct core_rx_start_ramrod_data core_rx_queue_start;
+ struct core_rx_stop_ramrod_data core_rx_queue_stop;
+ struct core_tx_start_ramrod_data core_tx_queue_start;
+ struct core_tx_stop_ramrod_data core_tx_queue_stop;
+ struct vport_filter_update_ramrod_data vport_filter_update;
+
+ struct vf_start_ramrod_data vf_start;
+ struct vf_stop_ramrod_data vf_stop;
};
#define EQ_MAX_CREDIT 0xffffffff
@@ -45,83 +46,83 @@ enum spq_priority {
union ecore_spq_req_comp {
struct ecore_spq_comp_cb cb;
- u64 *done_addr;
+ u64 *done_addr;
};
/* SPQ_MODE_EBLOCK */
struct ecore_spq_comp_done {
u64 done;
- u8 fw_return_code;
+ u8 fw_return_code;
};
struct ecore_spq_entry {
- osal_list_entry_t list;
+ osal_list_entry_t list;
- u8 flags;
+ u8 flags;
/* HSI slow path element */
- struct slow_path_element elem;
+ struct slow_path_element elem;
- union ramrod_data ramrod;
+ union ramrod_data ramrod;
- enum spq_priority priority;
+ enum spq_priority priority;
/* pending queue for this entry */
- osal_list_t *queue;
+ osal_list_t *queue;
- enum spq_mode comp_mode;
- struct ecore_spq_comp_cb comp_cb;
- struct ecore_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+ enum spq_mode comp_mode;
+ struct ecore_spq_comp_cb comp_cb;
+ struct ecore_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
};
struct ecore_eq {
- struct ecore_chain chain;
- u8 eq_sb_index; /* index within the SB */
- __le16 *p_fw_cons; /* ptr to index value */
+ struct ecore_chain chain;
+ u8 eq_sb_index; /* index within the SB */
+ __le16 *p_fw_cons; /* ptr to index value */
};
struct ecore_consq {
- struct ecore_chain chain;
+ struct ecore_chain chain;
};
struct ecore_spq {
- osal_spinlock_t lock;
+ osal_spinlock_t lock;
- osal_list_t unlimited_pending;
- osal_list_t pending;
- osal_list_t completion_pending;
- osal_list_t free_pool;
+ osal_list_t unlimited_pending;
+ osal_list_t pending;
+ osal_list_t completion_pending;
+ osal_list_t free_pool;
- struct ecore_chain chain;
+ struct ecore_chain chain;
/* allocated dma-able memory for spq entries (+ramrod data) */
- dma_addr_t p_phys;
- struct ecore_spq_entry *p_virt;
+ dma_addr_t p_phys;
+ struct ecore_spq_entry *p_virt;
/* Bitmap for handling out-of-order completions */
-#define SPQ_RING_SIZE \
+#define SPQ_RING_SIZE \
(CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
-#define SPQ_COMP_BMAP_SIZE \
-(SPQ_RING_SIZE / (sizeof(unsigned long) * 8 /* BITS_PER_LONG */))
- unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE];
- u8 comp_bitmap_idx;
-#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx) \
-(OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+/* BITS_PER_LONG */
+#define SPQ_COMP_BMAP_SIZE (SPQ_RING_SIZE / (sizeof(unsigned long) * 8))
+ unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE];
+ u8 comp_bitmap_idx;
+#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx) \
+ (OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
-#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx) \
-(OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx) \
+ (OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
-#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx) \
-(OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
+#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx) \
+ (OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
/* Statistics */
- u32 unlimited_pending_count;
- u32 normal_count;
- u32 high_count;
- u32 comp_sent_count;
- u32 comp_count;
+ u32 unlimited_pending_count;
+ u32 normal_count;
+ u32 high_count;
+ u32 comp_sent_count;
+ u32 comp_count;
- u32 cid;
+ u32 cid;
};
struct ecore_port;
@@ -136,9 +137,9 @@ struct ecore_hwfn;
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
+enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent,
- u8 *fw_return_code);
+ u8 *fw_return_code);
/**
* @brief ecore_spq_allocate - Alloocates & initializes the SPQ and EQ.
@@ -147,7 +148,7 @@ enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_spq_setup - Reset the SPQ to its start state.
@@ -175,7 +176,8 @@ void ecore_spq_free(struct ecore_hwfn *p_hwfn);
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent);
+ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry **pp_ent);
/**
* @brief ecore_spq_return_entry - Return an entry to spq free
@@ -184,8 +186,8 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent);
* @param p_hwfn
* @param p_ent
*/
-void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
- struct ecore_spq_entry *p_ent);
+void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_entry *p_ent);
/**
* @brief ecore_eq_allocate - Allocates & initializes an EQ struct
*
@@ -194,7 +196,8 @@ void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
*
* @return struct ecore_eq* - a newly allocated structure; NULL upon error.
*/
-struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
+struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn,
+ u16 num_elem);
/**
* @brief ecore_eq_setup - Reset the SPQ to its start state.
@@ -202,7 +205,8 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
* @param p_hwfn
* @param p_eq
*/
-void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_eq *p_eq);
/**
* @brief ecore_eq_deallocate - deallocates the given EQ struct.
@@ -210,7 +214,8 @@ void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
* @param p_hwfn
* @param p_eq
*/
-void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
+void ecore_eq_free(struct ecore_hwfn *p_hwfn,
+ struct ecore_eq *p_eq);
/**
* @brief ecore_eq_prod_update - update the FW with default EQ producer
@@ -218,7 +223,8 @@ void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq);
* @param p_hwfn
* @param prod
*/
-void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod);
+void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn,
+ u16 prod);
/**
* @brief ecore_eq_completion - Completes currently pending EQ elements
@@ -228,8 +234,8 @@ void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod);
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
- void *cookie);
+enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
+ void *cookie);
/**
* @brief ecore_spq_completion - Completes a single event
@@ -240,10 +246,10 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
- __le16 echo,
- u8 fw_return_code,
- union event_ring_data *p_data);
+enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data);
/**
* @brief ecore_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
@@ -262,7 +268,7 @@ u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
*
* @return struct ecore_eq* - a newly allocated structure; NULL upon error.
*/
-struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
+struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_consq_setup - Reset the ConsQ to its start
@@ -271,7 +277,8 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
* @param p_hwfn
* @param p_eq
*/
-void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_consq *p_consq);
/**
* @brief ecore_consq_free - deallocates the given ConsQ struct.
@@ -279,6 +286,7 @@ void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
* @param p_hwfn
* @param p_eq
*/
-void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq);
+void ecore_consq_free(struct ecore_hwfn *p_hwfn,
+ struct ecore_consq *p_consq);
#endif /* __ECORE_SPQ_H__ */
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index 1b3119d2..b28d7281 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -25,8 +25,8 @@
#include "ecore_cxt.h"
#include "ecore_vf.h"
#include "ecore_init_fw_funcs.h"
+#include "ecore_sp_commands.h"
-/* TEMPORARY until we implement print_enums... */
const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_NONE", /* ends tlv sequence */
"CHANNEL_TLV_ACQUIRE",
@@ -54,6 +54,178 @@ const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_MAX"
};
+/* IOV ramrods */
+static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ u8 fp_minor;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_vf->opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_start;
+
+ p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
+
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case ECORE_PCI_ETH_ROCE:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
+ p_hwfn->hw_info.personality);
+ return ECORE_INVAL;
+ }
+
+ fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
+ if (fp_minor > ETH_HSI_VER_MINOR &&
+ fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%d] - Requested fp hsi %02x.%02x which is"
+ " slightly newer than PF's %02x.%02x; Configuring"
+ " PFs version\n",
+ p_vf->abs_vf_id,
+ ETH_HSI_VER_MAJOR, fp_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+ fp_minor = ETH_HSI_VER_MINOR;
+ }
+
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Starting using HSI %02x.%02x\n",
+ p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
+ u32 concrete_vfid,
+ u16 opaque_vfid)
+{
+ struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_STOP,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_stop;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
+ bool b_enabled_only)
+{
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+ return false;
+ }
+
+ if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
+ (rel_vf_id < 0))
+ return false;
+
+ if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+ b_enabled_only)
+ return false;
+
+ return true;
+}
+
+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct ecore_vf_info *vf = OSAL_NULL;
+
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+ return OSAL_NULL;
+ }
+
+ if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+ vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+ else
+ DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
+ relative_vf_id);
+
+ return vf;
+}
+
+static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 rx_qid)
+{
+ if (rx_qid >= p_vf->num_rxqs)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[0x%02x] - can't touch Rx queue[%04x];"
+ " Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
+ return rx_qid < p_vf->num_rxqs;
+}
+
+static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 tx_qid)
+{
+ if (tx_qid >= p_vf->num_txqs)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[0x%02x] - can't touch Tx queue[%04x];"
+ " Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
+ return tx_qid < p_vf->num_txqs;
+}
+
+static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 sb_idx)
+{
+ int i;
+
+ for (i = 0; i < p_vf->num_sbs; i++)
+ if (p_vf->igu_sbs[i] == sb_idx)
+ return true;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
+ " one of its 0x%02x SBs\n",
+ p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
+
+ return false;
+}
+
/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
{
@@ -72,9 +244,9 @@ enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
struct ecore_bulletin_content *p_bulletin;
+ int crc_size = sizeof(p_bulletin->crc);
struct ecore_dmae_params params;
struct ecore_vf_info *p_vf;
- int crc_size = sizeof(p_bulletin->crc);
p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf)
@@ -106,7 +278,7 @@ enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
{
- struct ecore_hw_sriov_info *iov = &p_dev->sriov_info;
+ struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
int pos = iov->pos;
DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
@@ -148,6 +320,7 @@ static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info[%d]: nres %d, cap 0x%x,"
"ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
" stride %d, page size 0x%x\n", 0,
+ /* @@@TBD MichalK - function id */
iov->nres, iov->cap, iov->ctrl,
iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
iov->offset, iov->stride, iov->pgsz);
@@ -200,16 +373,14 @@ static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
{
- u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
- union pfvf_tlvs *p_reply_virt_addr;
- union vfpf_tlvs *p_req_virt_addr;
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+ struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
struct ecore_bulletin_content *p_bulletin_virt;
- struct ecore_pf_iov *p_iov_info;
dma_addr_t req_p, rply_p, bulletin_p;
+ union pfvf_tlvs *p_reply_virt_addr;
+ union vfpf_tlvs *p_req_virt_addr;
u8 idx = 0;
- p_iov_info = p_hwfn->pf_iov_info;
-
OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
@@ -226,7 +397,7 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */
- for (idx = 0; idx < num_vfs; idx++) {
+ for (idx = 0; idx < p_iov->total_vfs; idx++) {
struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
u32 concrete;
@@ -240,6 +411,7 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
#endif
vf->state = VF_STOPPED;
+ vf->b_init = false;
vf->bulletin.phys = idx *
sizeof(struct ecore_bulletin_content) + bulletin_p;
@@ -247,7 +419,7 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
vf->bulletin.size = sizeof(struct ecore_bulletin_content);
vf->relative_vf_id = idx;
- vf->abs_vf_id = idx + p_hwfn->hw_info.first_vf_in_pf;
+ vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
vf->concrete_fid = concrete;
/* TODO - need to devise a better way of getting opaque */
@@ -255,6 +427,9 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
(vf->abs_vf_id << 8);
/* @@TBD MichalK - add base vport_id of VFs to equation */
vf->vport_id = p_iov_info->base_vport_id + idx;
+
+ vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
+ vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
}
}
@@ -264,7 +439,7 @@ static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
void **p_v_addr;
u16 num_vfs = 0;
- num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
+ num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
@@ -297,16 +472,15 @@ static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
return ECORE_NOMEM;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "PF's Requests mailbox [%p virt 0x%" PRIx64 " phys], "
- "Response mailbox [%p virt 0x%" PRIx64 " phys] Bulletins"
- " [%p virt 0x%" PRIx64 " phys]\n",
+ "PF's Requests mailbox [%p virt 0x%lx phys], "
+ "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
+ " [%p virt 0x%lx phys]\n",
p_iov_info->mbx_msg_virt_addr,
- (u64)p_iov_info->mbx_msg_phys_addr,
+ (unsigned long)p_iov_info->mbx_msg_phys_addr,
p_iov_info->mbx_reply_virt_addr,
- (u64)p_iov_info->mbx_reply_phys_addr,
- p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
-
- /* @@@TBD MichalK - statistics / RSS */
+ (unsigned long)p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins,
+ (unsigned long)p_iov_info->bulletins_phys);
return ECORE_SUCCESS;
}
@@ -332,38 +506,33 @@ static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
p_iov_info->p_bulletins,
p_iov_info->bulletins_phys,
p_iov_info->bulletins_size);
-
- /* @@@TBD MichalK - statistics / RSS */
}
enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_pf_iov *p_sriov;
if (!IS_PF_SRIOV(p_hwfn)) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"No SR-IOV - no need for IOV db\n");
- return rc;
+ return ECORE_SUCCESS;
}
p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
if (!p_sriov) {
DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_sriov'");
+ "Failed to allocate `struct ecore_sriov'\n");
return ECORE_NOMEM;
}
p_hwfn->pf_iov_info = p_sriov;
- rc = ecore_iov_allocate_vfdb(p_hwfn);
-
- return rc;
+ return ecore_iov_allocate_vfdb(p_hwfn);
}
void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
- if (!IS_PF_SRIOV(p_hwfn) || !p_hwfn->pf_iov_info)
+ if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return;
ecore_iov_setup_vfdb(p_hwfn);
@@ -372,39 +541,59 @@ void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
void ecore_iov_free(struct ecore_hwfn *p_hwfn)
{
- if (p_hwfn->pf_iov_info) {
+ if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
ecore_iov_free_vfdb(p_hwfn);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
}
}
-enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
+{
+ OSAL_FREE(p_dev, p_dev->p_iov_info);
+ p_dev->p_iov_info = OSAL_NULL;
+}
+
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ int pos;
enum _ecore_status_t rc;
- /* @@@ TBD get this information from shmem / pci cfg */
if (IS_VF(p_hwfn->p_dev))
return ECORE_SUCCESS;
- /* First hwfn should learn the PCI configuration */
- if (IS_LEAD_HWFN(p_hwfn)) {
- struct ecore_dev *p_dev = p_hwfn->p_dev;
- int *pos = &p_hwfn->p_dev->sriov_info.pos;
+ /* Learn the PCI configuration */
+ pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
+ PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
+ return ECORE_SUCCESS;
+ }
- *pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
- PCI_EXT_CAP_ID_SRIOV);
- if (!*pos) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "No PCIe IOV support\n");
- return ECORE_SUCCESS;
- }
+ /* Allocate a new struct for IOV information */
+ /* TODO - can change to VALLOC when its available */
+ p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ sizeof(*p_dev->p_iov_info));
+ if (!p_dev->p_iov_info) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't support IOV due to lack of memory\n");
+ return ECORE_NOMEM;
+ }
+ p_dev->p_iov_info->pos = pos;
- rc = ecore_iov_pci_cfg_info(p_dev);
- if (rc)
- return rc;
- } else if (!p_hwfn->p_dev->sriov_info.pos) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
+ rc = ecore_iov_pci_cfg_info(p_dev);
+ if (rc)
+ return rc;
+
+ /* We want PF IOV to be synonemous with the existence of p_iov_info;
+ * In case the capability is published but there are no VFs, simply
+ * de-allocate the struct.
+ */
+ if (!p_dev->p_iov_info->total_vfs) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "IOV capabilities, but no VFs are published\n");
+ OSAL_FREE(p_dev, p_dev->p_iov_info);
+ p_dev->p_iov_info = OSAL_NULL;
return ECORE_SUCCESS;
}
@@ -412,61 +601,66 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
* VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
* after the first engine's VFs.
*/
- p_hwfn->hw_info.first_vf_in_pf = p_hwfn->p_dev->sriov_info.offset +
- p_hwfn->abs_pf_id - 16;
+ p_dev->p_iov_info->first_vf_in_pf = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
if (ECORE_PATH_ID(p_hwfn))
- p_hwfn->hw_info.first_vf_in_pf -= MAX_NUM_VFS_BB;
+ p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "First VF in hwfn 0x%08x\n", p_hwfn->hw_info.first_vf_in_pf);
+ "First VF in hwfn 0x%08x\n",
+ p_dev->p_iov_info->first_vf_in_pf);
return ECORE_SUCCESS;
}
-struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
- u16 relative_vf_id,
- bool b_enabled_only)
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
{
- struct ecore_vf_info *vf = OSAL_NULL;
-
- if (!p_hwfn->pf_iov_info) {
- DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
- return OSAL_NULL;
- }
+ /* Check PF supports sriov */
+ if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
+ !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return false;
- if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
- vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
- else
- DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
- relative_vf_id);
+ /* Check VF validity */
+ if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true))
+ return false;
- return vf;
+ return true;
}
-void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
+void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
u16 rel_vf_id, u8 to_disable)
{
struct ecore_vf_info *vf;
+ int i;
- vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
- if (!vf)
- return;
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- vf->to_disable = to_disable;
+ vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf)
+ continue;
+
+ vf->to_disable = to_disable;
+ }
}
-void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable)
+void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
+ u8 to_disable)
{
u16 i;
- for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
- ecore_iov_set_vf_to_disable(p_hwfn, i, to_disable);
+ if (!IS_ECORE_SRIOV(p_dev))
+ return;
+
+ for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
+ ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
}
#ifndef LINUX_REMOVE
/* @@@TBD Consider taking outside of ecore... */
enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
- u16 vf_id, void *ctx)
+ u16 vf_id,
+ void *ctx)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
@@ -483,29 +677,9 @@ enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
}
#endif
-/**
- * VF enable primitives
- *
- * when pretend is required the caller is reponsible
- * for calling pretend prioir to calling these routines
- */
-
-/* clears vf error in all semi blocks
- * Assumption: called under VF pretend...
- */
-static OSAL_INLINE void ecore_iov_vf_semi_clear_err(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- ecore_wr(p_hwfn, p_ptt, TSEM_REG_VF_ERROR, 1);
- ecore_wr(p_hwfn, p_ptt, USEM_REG_VF_ERROR, 1);
- ecore_wr(p_hwfn, p_ptt, MSEM_REG_VF_ERROR, 1);
- ecore_wr(p_hwfn, p_ptt, XSEM_REG_VF_ERROR, 1);
- ecore_wr(p_hwfn, p_ptt, YSEM_REG_VF_ERROR, 1);
- ecore_wr(p_hwfn, p_ptt, PSEM_REG_VF_ERROR, 1);
-}
-
-static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 abs_vfid)
+static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 abs_vfid)
{
ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
@@ -517,30 +691,20 @@ static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf)
{
int i;
- u16 igu_sb_id;
/* Set VF masks and configuration - pretend */
ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "value in VF_CONFIGURATION of vf %d after write %x\n",
- vf->abs_vf_id,
- ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));
-
/* unpretend */
ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
- /* iterate ove all queues, clear sb consumer */
- for (i = 0; i < vf->num_sbs; i++) {
- igu_sb_id = vf->igu_sbs[i];
- /* Set then clear... */
- ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
- vf->opaque_fid);
- ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,
- vf->opaque_fid);
- }
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf->num_sbs; i++)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, true);
}
static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
@@ -581,9 +745,11 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
ECORE_VF_ABS_ID(p_hwfn, vf));
+ ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
vf->abs_vf_id, vf->num_sbs);
- if (rc)
+ if (rc != ECORE_SUCCESS)
return rc;
ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
@@ -597,18 +763,6 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
/* unpretend */
ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
- if (vf->state != VF_STOPPED) {
- DP_NOTICE(p_hwfn, true, "VF[%02x] is already started\n",
- vf->abs_vf_id);
- return ECORE_INVAL;
- }
-
- /* Start VF */
- rc = ecore_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
- if (rc != ECORE_SUCCESS)
- DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
- vf->abs_vf_id);
-
vf->state = VF_FREE;
return rc;
@@ -631,8 +785,7 @@ static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf, u8 enable)
{
- u32 reg_addr;
- u32 val;
+ u32 reg_addr, val;
u16 qzone_id = 0;
int qid;
@@ -650,13 +803,13 @@ static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- /* Reset vf in IGU interrupts are still disabled */
+ /* Reset vf in IGU - interrupts are still disabled */
ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
- ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1 /* enable */);
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
/* Permission Table */
- ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true /* enable */);
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
}
static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
@@ -664,11 +817,11 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
u16 num_rx_queues)
{
- int igu_id = 0;
- int qid = 0;
+ struct ecore_igu_block *igu_blocks;
+ int qid = 0, igu_id = 0;
u32 val = 0;
- struct ecore_igu_block *igu_blocks =
- p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
+
+ igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
@@ -752,24 +905,24 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id, u16 num_rx_queues)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 num_of_vf_available_chains = 0;
struct ecore_vf_info *vf = OSAL_NULL;
- u8 num_of_vf_available_chains = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cids;
u8 i;
- if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {
- DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
- rel_vf_id);
- return ECORE_INVAL;
- }
-
vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
if (!vf) {
DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
return ECORE_UNKNOWN_ERROR;
}
+ if (vf->b_init) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
+ rel_vf_id);
+ return ECORE_INVAL;
+ }
+
/* Limit number of queues according to number of CIDs */
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
@@ -817,22 +970,62 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
if (rc == ECORE_SUCCESS) {
- struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;
- u16 vf_id = vf->relative_vf_id;
+ vf->b_init = true;
+ p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
+ (1ULL << (vf->relative_vf_id % 64));
- p_iov->num_vfs++;
- p_iov->active_vfs[vf_id / 64] |= (1ULL << (vf_id % 64));
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->p_iov_info->num_vfs++;
}
return rc;
}
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id)
{
+ struct ecore_mcp_link_capabilities caps;
+ struct ecore_mcp_link_params params;
+ struct ecore_mcp_link_state link;
struct ecore_vf_info *vf = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_SUCCESS;
vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!vf) {
@@ -840,38 +1033,45 @@ enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
return ECORE_UNKNOWN_ERROR;
}
- if (vf->state != VF_STOPPED) {
- /* Stopping the VF */
- rc = ecore_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+ if (vf->bulletin.p_virt)
+ OSAL_MEMSET(vf->bulletin.p_virt, 0,
+ sizeof(*vf->bulletin.p_virt));
- if (rc != ECORE_SUCCESS) {
- DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
- rc);
- return rc;
- }
+ OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
- vf->state = VF_STOPPED;
- }
+ /* Get the link configuration back in bulletin so
+ * that when VFs are re-enabled they get the actual
+ * link configuration.
+ */
+ OSAL_MEMCPY(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
+ OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
+ OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
+ sizeof(caps));
+ ecore_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+ /* Forget the VF's acquisition message */
+ OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
/* disablng interrupts and resetting permission table was done during
* vf-close, however, we could get here without going through vf_close
*/
/* Disable Interrupts for VF */
- ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
/* Reset Permission table */
- ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
vf->num_rxqs = 0;
vf->num_txqs = 0;
ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
- if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {
- struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;
- u16 vf_id = vf->relative_vf_id;
+ if (vf->b_init) {
+ vf->b_init = false;
+ p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
+ ~(1ULL << (vf->relative_vf_id / 64));
- p_iov->num_vfs--;
- p_iov->active_vfs[vf_id / 64] &= ~(1ULL << (vf_id % 64));
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->p_iov_info->num_vfs--;
}
return ECORE_SUCCESS;
@@ -885,10 +1085,6 @@ static bool ecore_iov_tlv_supported(u16 tlvtype)
static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf, u16 tlv)
{
- /* we don't lock the channel for unsupported tlvs */
- if (!ecore_iov_tlv_supported(tlv))
- return;
-
/* lock the channel */
/* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
@@ -896,35 +1092,35 @@ static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
/* vf->op_current = tlv; @@@TBD MichalK */
/* log the lock */
- DP_VERBOSE(p_hwfn,
- ECORE_MSG_IOV,
- "VF[%d]: vf pf channel locked by %s\n",
- vf->abs_vf_id, ecore_channel_tlvs_string[tlv]);
+ if (ecore_iov_tlv_supported(tlv))
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel locked by %s\n",
+ vf->abs_vf_id,
+ ecore_channel_tlvs_string[tlv]);
+ else
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel locked by %04x\n",
+ vf->abs_vf_id, tlv);
}
static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
u16 expected_tlv)
{
- /* we don't unlock the channel for unsupported tlvs */
- if (!ecore_iov_tlv_supported(expected_tlv))
- return;
-
- /* WARN(expected_tlv != vf->op_current,
- * "lock mismatch: expected %s found %s",
- * channel_tlvs_string[expected_tlv],
- * channel_tlvs_string[vf->op_current]);
- * @@@TBD MichalK
- */
-
- /* lock the channel */
- /* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */
-
/* log the unlock */
- DP_VERBOSE(p_hwfn,
- ECORE_MSG_IOV,
- "VF[%d]: vf pf channel unlocked by %s\n",
- vf->abs_vf_id, ecore_channel_tlvs_string[expected_tlv]);
+ if (ecore_iov_tlv_supported(expected_tlv))
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel unlocked by %s\n",
+ vf->abs_vf_id,
+ ecore_channel_tlvs_string[expected_tlv]);
+ else
+ DP_VERBOSE(p_hwfn,
+ ECORE_MSG_IOV,
+ "VF[%d]: vf pf channel unlocked by %04x\n",
+ vf->abs_vf_id, expected_tlv);
/* record the locking op */
/* vf->op_current = CHANNEL_TLV_NONE; */
@@ -996,15 +1192,15 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
mbx->reply_virt->default_resp.hdr.status = status;
+ ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
#ifdef CONFIG_ECORE_SW_CHANNEL
mbx->sw_mbx.response_size =
length + sizeof(struct channel_list_end_tlv);
-#endif
- ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
-
- if (!p_hwfn->p_dev->sriov_info.b_hw_channel)
+ if (!p_hwfn->p_dev->b_hw_channel)
return;
+#endif
eng_vf_id = p_vf->abs_vf_id;
@@ -1062,7 +1258,7 @@ static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
u16 size, total_len, i;
OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
- p_mbx->offset = (u8 *)(p_mbx->reply_virt);
+ p_mbx->offset = (u8 *)p_mbx->reply_virt;
size = sizeof(struct pfvf_def_resp_tlv);
total_len = size;
@@ -1102,23 +1298,37 @@ static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
{
struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
- mbx->offset = (u8 *)(mbx->reply_virt);
+ mbx->offset = (u8 *)mbx->reply_virt;
ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+
+ OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
+}
+
+struct ecore_public_vf_info
+*ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct ecore_vf_info *vf = OSAL_NULL;
+
+ vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+ if (!vf)
+ return OSAL_NULL;
+
+ return &vf->p_vf_info;
}
static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf)
{
+ u32 i;
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
- p_vf->num_mac_filters = 0;
- p_vf->num_vlan_filters = 0;
- p_vf->num_mc_filters = 0;
p_vf->configured_features = 0;
/* If VF previously requested less resources, go back to default */
@@ -1127,39 +1337,169 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
p_vf->num_active_rxqs = 0;
+ for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++)
+ p_vf->vf_queues[i].rxq_active = 0;
+
OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
}
-static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_vf_info *vf)
+static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ int i;
+
+ /* Queue related information */
+ p_resp->num_rxqs = p_vf->num_rxqs;
+ p_resp->num_txqs = p_vf->num_txqs;
+ p_resp->num_sbs = p_vf->num_sbs;
+
+ for (i = 0; i < p_resp->num_sbs; i++) {
+ p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
+ /* TODO - what's this sb_qid field? Is it deprecated?
+ * or is there an ecore_client that looks at this?
+ */
+ p_resp->hw_sbs[i].sb_qid = 0;
+ }
+
+ /* These fields are filled for backward compatibility.
+ * Unused by modern vfs.
+ */
+ for (i = 0; i < p_resp->num_rxqs; i++) {
+ ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&p_resp->hw_qid[i]);
+ p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ }
+
+ /* Filter related information */
+ p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
+ p_req->num_mac_filters);
+ p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
+ p_req->num_vlan_filters);
+
+ /* This isn't really needed/enforced, but some legacy VFs might depend
+ * on the correct filling of this field.
+ */
+ p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
+
+ /* Validate sufficient resources for VF */
+ if (p_resp->num_rxqs < p_req->num_rxqs ||
+ p_resp->num_txqs < p_req->num_txqs ||
+ p_resp->num_sbs < p_req->num_sbs ||
+ p_resp->num_mac_filters < p_req->num_mac_filters ||
+ p_resp->num_vlan_filters < p_req->num_vlan_filters ||
+ p_resp->num_mc_filters < p_req->num_mc_filters) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
+ " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
+ " vlan [%02x/%02x] mc [%02x/%02x]\n",
+ p_vf->abs_vf_id,
+ p_req->num_rxqs, p_resp->num_rxqs,
+ p_req->num_rxqs, p_resp->num_txqs,
+ p_req->num_sbs, p_resp->num_sbs,
+ p_req->num_mac_filters, p_resp->num_mac_filters,
+ p_req->num_vlan_filters, p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* Some legacy OSes are incapable of correctly handling this
+ * failure.
+ */
+ if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
+ (p_vf->acquire.vfdev_info.os_type ==
+ VFPF_ACQUIRE_OS_WINDOWS))
+ return PFVF_STATUS_SUCCESS;
+
+ return PFVF_STATUS_NO_RESOURCE;
+ }
+
+ return PFVF_STATUS_SUCCESS;
+}
+
+static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
+ struct pfvf_stats_info *p_stats)
+{
+ p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ OFFSETOF(struct mstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+ p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
+ OFFSETOF(struct ustorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+ p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
+ OFFSETOF(struct pstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ p_stats->tstats.address = 0;
+ p_stats->tstats.len = 0;
+}
+
+static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
{
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
- struct pf_vf_resc *resc = &resp->resc;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
- u16 length;
- u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+ struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+ u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ struct pf_vf_resc *resc = &resp->resc;
+ enum _ecore_status_t rc;
+
+ OSAL_MEMSET(resp, 0, sizeof(*resp));
+
+ /* Write the PF version so that VF would know which version
+ * is supported - might be later overridden. This guarantees that
+ * VF could recognize legacy PF based on lack of versions in reply.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
/* Validate FW compatibility */
- if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
- req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
- req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
- req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
+
+ /* This legacy support would need to be removed once
+ * the major has changed.
+ */
+ OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] is pre-fastpath HSI\n",
+ vf->abs_vf_id);
+ p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF[%d] needs fastpath HSI %02x.%02x, which is"
+ " incompatible with loaded FW's faspath"
+ " HSI %02x.%02x\n",
+ vf->abs_vf_id,
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ goto out;
+ }
+ }
+
+ /* On 100g PFs, prevent old VFs from loading */
+ if ((p_hwfn->p_dev->num_hwfns > 1) &&
+ !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
DP_INFO(p_hwfn,
- "VF[%d] is running an incompatible driver [VF needs"
- " FW %02x:%02x:%02x:%02x but Hypervisor is"
- " using %02x:%02x:%02x:%02x]\n",
- vf->abs_vf_id, req->vfdev_info.fw_major,
- req->vfdev_info.fw_minor, req->vfdev_info.fw_revision,
- req->vfdev_info.fw_engineering, FW_MAJOR_VERSION,
- FW_MINOR_VERSION, FW_REVISION_VERSION,
- FW_ENGINEERING_VERSION);
- vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ "VF[%d] is running an old driver that doesn't support"
+ " 100g\n",
+ vf->abs_vf_id);
goto out;
}
+
#ifndef __EXTRACT__LINUX__
if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
@@ -1167,13 +1507,10 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
}
#endif
- OSAL_MEMSET(resp, 0, sizeof(*resp));
+ /* Store the acquire message */
+ OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
- /* Fill in vf info stuff : @@@TBD MichalK Hard Coded for now... */
vf->opaque_fid = req->vfdev_info.opaque_fid;
- vf->num_mac_filters = 1;
- vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
- vf->num_mc_filters = ECORE_MAX_MC_ADDRS;
vf->vf_bulletin = req->bulletin_addr;
vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
@@ -1183,28 +1520,13 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
pfdev_info->indices_per_sb = PIS_PER_SB;
- pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED;
-
- pfdev_info->stats_info.mstats.address =
- PXP_VF_BAR0_START_MSDM_ZONE_B +
- OFFSETOF(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.mstats.len =
- sizeof(struct eth_mstorm_per_queue_stat);
- pfdev_info->stats_info.ustats.address =
- PXP_VF_BAR0_START_USDM_ZONE_B +
- OFFSETOF(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.ustats.len =
- sizeof(struct eth_ustorm_per_queue_stat);
+ pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+ if (p_hwfn->p_dev->num_hwfns > 1)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
- pfdev_info->stats_info.pstats.address =
- PXP_VF_BAR0_START_PSDM_ZONE_B +
- OFFSETOF(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
- pfdev_info->stats_info.pstats.len =
- sizeof(struct eth_pstorm_per_queue_stat);
-
- pfdev_info->stats_info.tstats.address = 0;
- pfdev_info->stats_info.tstats.len = 0;
+ ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
ETH_ALEN);
@@ -1213,35 +1535,36 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
pfdev_info->fw_minor = FW_MINOR_VERSION;
pfdev_info->fw_rev = FW_REVISION_VERSION;
pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+
+ /* Incorrect when legacy, but doesn't matter as legacy isn't reading
+ * this field.
+ */
+ pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
+ req->vfdev_info.eth_fp_hsi_minor);
pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
- ecore_mcp_get_mfw_ver(p_hwfn->p_dev, p_ptt, &pfdev_info->mfw_ver,
+ ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
OSAL_NULL);
pfdev_info->dev_type = p_hwfn->p_dev->type;
pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
- /* Fill in resc : @@@TBD MichalK Hard Coded for now... */
- resc->num_rxqs = vf->num_rxqs;
- resc->num_txqs = vf->num_txqs;
- resc->num_sbs = vf->num_sbs;
- for (i = 0; i < resc->num_sbs; i++) {
- resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
- resc->hw_sbs[i].sb_qid = 0;
- }
+ /* Fill resources available to VF; Make sure there are enough to
+ * satisfy the VF's request.
+ */
+ vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+ &req->resc_request, resc);
+ if (vfpf_status != PFVF_STATUS_SUCCESS)
+ goto out;
- for (i = 0; i < resc->num_rxqs; i++) {
- ecore_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
- (u16 *)&resc->hw_qid[i]);
- resc->cid[i] = vf->vf_queues[i].fw_cid;
+ /* Start the VF in FW */
+ rc = ecore_sp_vf_start(p_hwfn, vf);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
+ vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_FAILURE;
+ goto out;
}
- resc->num_mac_filters = OSAL_MIN_T(u8, vf->num_mac_filters,
- req->resc_request.num_mac_filters);
- resc->num_vlan_filters = OSAL_MIN_T(u8, vf->num_vlan_filters,
- req->resc_request.num_vlan_filters);
- resc->num_mc_filters = OSAL_MIN_T(u8, vf->num_mc_filters,
- req->resc_request.num_mc_filters);
-
/* Fill agreed size of bulletin board in response, and post
* an initial image to the bulletin board.
*/
@@ -1250,25 +1573,22 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
- " db_size=%d, idx_per_sb=%d, pf_cap=0x%" PRIx64 "\n"
+ " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
"resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
" n_vlans-%d, n_mcs-%d\n",
vf->abs_vf_id, resp->pfdev_info.chip_num,
resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
- resp->pfdev_info.capabilities, resc->num_rxqs,
+ (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
resc->num_vlan_filters, resc->num_mc_filters);
vf->state = VF_ACQUIRED;
- /* Prepare Response */
- length = sizeof(struct pfvf_acquire_resp_tlv);
-
out:
+ /* Prepare Response */
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
- length, vfpf_status);
-
- /* @@@TBD Bulletin */
+ sizeof(struct pfvf_acquire_resp_tlv),
+ vfpf_status);
}
static enum _ecore_status_t
@@ -1310,8 +1630,8 @@ static enum _ecore_status_t
ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_filter_ucast filter;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
OSAL_MEMSET(&filter, 0, sizeof(filter));
@@ -1322,24 +1642,25 @@ ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
/* Reconfigure vlans */
for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
- if (p_vf->shadow_config.vlans[i].used) {
- filter.type = ECORE_FILTER_VLAN;
- filter.vlan = p_vf->shadow_config.vlans[i].vid;
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Reconfig VLAN [0x%04x] for VF [%04x]\n",
- filter.vlan, p_vf->relative_vf_id);
- rc = ecore_sp_eth_filter_ucast(p_hwfn,
- p_vf->opaque_fid,
- &filter,
- ECORE_SPQ_MODE_CB,
+ if (!p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ filter.type = ECORE_FILTER_VLAN;
+ filter.vlan = p_vf->shadow_config.vlans[i].vid;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ rc = ecore_sp_eth_filter_ucast(p_hwfn,
+ p_vf->opaque_fid,
+ &filter,
+ ECORE_SPQ_MODE_CB,
OSAL_NULL);
- if (rc) {
- DP_NOTICE(p_hwfn, true,
- "Failed to configure VLAN [%04x]"
- " to VF [%04x]\n",
- filter.vlan, p_vf->relative_vf_id);
- break;
- }
+ if (rc) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to configure VLAN [%04x]"
+ " to VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ break;
}
}
@@ -1457,7 +1778,7 @@ static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
if (rc) {
DP_NOTICE(p_hwfn, true,
"Failed to send Rx update"
- " queue[0x%04x]\n",
+ " fo queue[0x%04x]\n",
qid);
return rc;
}
@@ -1482,14 +1803,14 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- struct vfpf_vport_start_tlv *start = &mbx->req_virt->start_vport;
struct ecore_sp_vport_start_params params = { 0 };
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_vport_start_tlv *start;
u8 status = PFVF_STATUS_SUCCESS;
struct ecore_vf_info *vf_info;
- enum _ecore_status_t rc;
u64 *p_bitmap;
int sb_id;
+ enum _ecore_status_t rc;
vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
if (!vf_info) {
@@ -1500,6 +1821,7 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
}
vf->state = VF_ENABLED;
+ start = &mbx->req_virt->start_vport;
/* Initialize Status block in CAU */
for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
@@ -1513,7 +1835,7 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
ecore_int_cau_conf_sb(p_hwfn, p_ptt,
start->sb_addr[sb_id],
vf->igu_sbs[sb_id],
- vf->abs_vf_id, 1 /* VF Valid */);
+ vf->abs_vf_id, 1);
}
ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
@@ -1539,7 +1861,7 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
#ifndef ASIC_ONLY
if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
DP_NOTICE(p_hwfn, false,
- "FPGA: Don't confi VF for Tx-switching [no pVFC]\n");
+ "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
params.tx_switching = false;
}
#endif
@@ -1551,6 +1873,7 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
+ params.check_mac = true;
rc = ecore_sp_eth_vport_start(p_hwfn, &params);
if (rc != ECORE_SUCCESS) {
@@ -1596,48 +1919,152 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
sizeof(struct pfvf_def_resp_tlv), status);
}
+static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf,
+ u8 status, bool b_legacy)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ struct vfpf_start_rxq_tlv *req;
+ u16 length;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
+ p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
+ length);
+ ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
+ req = &mbx->req_virt->start_rxq;
+ p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ OFFSETOF(struct mstorm_vf_zone,
+ non_trigger.eth_rx_queue_producers) +
+ sizeof(struct eth_rx_prod_data) * req->rx_qid;
+ }
+
+ ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_queue_start_common_params p_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- struct vfpf_start_rxq_tlv *req = &mbx->req_virt->start_rxq;
- u16 length = sizeof(struct pfvf_def_resp_tlv);
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct vfpf_start_rxq_tlv *req;
+ bool b_legacy_vf = false;
enum _ecore_status_t rc;
+ req = &mbx->req_virt->start_rxq;
+ OSAL_MEMSET(&p_params, 0, sizeof(p_params));
+ p_params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
+ p_params.vf_qid = req->rx_qid;
+ p_params.vport_id = vf->vport_id;
+ p_params.stats_id = vf->abs_vf_id + 0x10,
+ p_params.sb = req->hw_sb;
+ p_params.sb_idx = req->sb_index;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+ !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy_vf = true;
+ else
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+ 0);
+
rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
vf->vf_queues[req->rx_qid].fw_cid,
- vf->vf_queues[req->rx_qid].fw_rx_qid,
- vf->vport_id,
- vf->abs_vf_id + 0x10,
- req->hw_sb,
- req->sb_index,
+ &p_params,
req->bd_max_bytes,
req->rxq_addr,
req->cqe_pbl_addr,
- req->cqe_pbl_size);
+ req->cqe_pbl_size,
+ b_legacy_vf);
if (rc) {
status = PFVF_STATUS_FAILURE;
} else {
+ status = PFVF_STATUS_SUCCESS;
vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
- ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_RXQ,
- length, status);
+out:
+ ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf,
+ status, b_legacy_vf);
+}
+
+static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ u8 status)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ bool b_legacy = false;
+ u16 length;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy = true;
+
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
+ p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
+ length);
+ ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
+ u16 qid = mbx->req_virt->start_txq.tx_qid;
+
+ p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
+ DQ_DEMS_LEGACY);
+ }
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_queue_start_common_params p_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- struct vfpf_start_txq_tlv *req = &mbx->req_virt->start_txq;
- u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_NO_RESOURCE;
union ecore_qm_pq_params pq_params;
- u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_start_txq_tlv *req;
enum _ecore_status_t rc;
/* Prepare the parameters which would choose the right PQ */
@@ -1645,24 +2072,36 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
pq_params.eth.is_vf = 1;
pq_params.eth.vf_id = vf->relative_vf_id;
- rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
- vf->opaque_fid,
- vf->vf_queues[req->tx_qid].fw_tx_qid,
- vf->vf_queues[req->tx_qid].fw_cid,
- vf->vport_id,
- vf->abs_vf_id + 0x10,
- req->hw_sb,
- req->sb_index,
- req->pbl_addr,
- req->pbl_size, &pq_params);
+ req = &mbx->req_virt->start_txq;
+ OSAL_MEMSET(&p_params, 0, sizeof(p_params));
+ p_params.queue_id = (u8)vf->vf_queues[req->tx_qid].fw_tx_qid;
+ p_params.vport_id = vf->vport_id;
+ p_params.stats_id = vf->abs_vf_id + 0x10,
+ p_params.sb = req->hw_sb;
+ p_params.sb_idx = req->sb_index;
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+ !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
+ rc = ecore_sp_eth_txq_start_ramrod(
+ p_hwfn,
+ vf->opaque_fid,
+ vf->vf_queues[req->tx_qid].fw_cid,
+ &p_params,
+ req->pbl_addr,
+ req->pbl_size,
+ &pq_params);
if (rc)
status = PFVF_STATUS_FAILURE;
- else
+ else {
+ status = PFVF_STATUS_SUCCESS;
vf->vf_queues[req->tx_qid].txq_active = true;
+ }
- ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
- length, status);
+out:
+ ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
}
static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
@@ -1722,16 +2161,17 @@ static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- struct vfpf_stop_rxqs_tlv *req = &mbx->req_virt->stop_rxqs;
u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_rxqs_tlv *req;
enum _ecore_status_t rc;
/* We give the option of starting from qid != 0, in this case we
* need to make sure that qid + num_qs doesn't exceed the actual
* amount of queues that exist.
*/
+ req = &mbx->req_virt->stop_rxqs;
rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
req->num_rxqs, req->cqe_completion);
if (rc)
@@ -1745,16 +2185,17 @@ static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- struct vfpf_stop_txqs_tlv *req = &mbx->req_virt->stop_txqs;
u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_txqs_tlv *req;
enum _ecore_status_t rc;
/* We give the option of starting from qid != 0, in this case we
* need to make sure that qid + num_qs doesn't exceed the actual
* amount of queues that exist.
*/
+ req = &mbx->req_virt->stop_txqs;
rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
if (rc)
status = PFVF_STATUS_FAILURE;
@@ -1767,16 +2208,17 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- struct vfpf_update_rxq_tlv *req = &mbx->req_virt->update_rxq;
u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_update_rxq_tlv *req;
u8 status = PFVF_STATUS_SUCCESS;
u8 complete_event_flg;
u8 complete_cqe_flg;
- enum _ecore_status_t rc;
u16 qid;
+ enum _ecore_status_t rc;
u8 i;
+ req = &mbx->req_virt->update_rxq;
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
@@ -1851,13 +2293,14 @@ ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
- if (p_act_tlv) {
- p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
- p_data->vport_active_rx_flg = p_act_tlv->active_rx;
- p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
- p_data->vport_active_tx_flg = p_act_tlv->active_tx;
- *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
- }
+ if (!p_act_tlv)
+ return;
+
+ p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+ p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+ p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+ p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
}
static void
@@ -1895,20 +2338,21 @@ ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_tx_switch_tlv)
+ return;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
DP_NOTICE(p_hwfn, false,
- "FPGA: Ignore tx-switching configuration originating from VFs\n");
+ "FPGA: Ignore tx-switching configuration originating"
+ " from VFs\n");
return;
}
#endif
- if (p_tx_switch_tlv) {
- p_data->update_tx_switching_flg = 1;
- p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
- *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
- }
+ p_data->update_tx_switching_flg = 1;
+ p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
}
static void
@@ -1922,14 +2366,14 @@ ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_mcast_tlv)
+ return;
- if (p_mcast_tlv) {
- p_data->update_approx_mcast_flg = 1;
- OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
- sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
- *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
- }
+ p_data->update_approx_mcast_flg = 1;
+ OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
+ sizeof(unsigned long) *
+ ETH_MULTICAST_MAC_BINS_IN_REGS);
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
}
static void
@@ -1937,23 +2381,20 @@ ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_update_params *p_data,
struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
{
+ struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_tlv)
+ return;
- if (p_accept_tlv) {
- p_data->accept_flags.update_rx_mode_config =
- p_accept_tlv->update_rx_mode;
- p_data->accept_flags.rx_accept_filter =
- p_accept_tlv->rx_accept_filter;
- p_data->accept_flags.update_tx_mode_config =
- p_accept_tlv->update_tx_mode;
- p_data->accept_flags.tx_accept_filter =
- p_accept_tlv->tx_accept_filter;
- *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
- }
+ p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+ p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+ p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+ p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
}
static void
@@ -1967,13 +2408,13 @@ ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_any_vlan)
+ return;
- if (p_accept_any_vlan) {
- p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
- p_data->update_accept_any_vlan_flg =
- p_accept_any_vlan->update_accept_any_vlan_flg;
- *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
- }
+ p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+ p_data->update_accept_any_vlan_flg =
+ p_accept_any_vlan->update_accept_any_vlan_flg;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
}
static void
@@ -1985,68 +2426,65 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
{
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
- u16 table_size;
u16 i, q_idx, max_q_idx;
+ u16 table_size;
p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
- if (p_rss_tlv) {
- OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
-
- p_rss->update_rss_config =
- !!(p_rss_tlv->update_rss_flags &
- VFPF_UPDATE_RSS_CONFIG_FLAG);
- p_rss->update_rss_capabilities =
- !!(p_rss_tlv->update_rss_flags &
- VFPF_UPDATE_RSS_CAPS_FLAG);
- p_rss->update_rss_ind_table =
- !!(p_rss_tlv->update_rss_flags &
- VFPF_UPDATE_RSS_IND_TABLE_FLAG);
- p_rss->update_rss_key =
- !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_KEY_FLAG);
-
- p_rss->rss_enable = p_rss_tlv->rss_enable;
- p_rss->rss_eng_id = vf->relative_vf_id + 1;
- p_rss->rss_caps = p_rss_tlv->rss_caps;
- p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
- OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
- sizeof(p_rss->rss_ind_table));
- OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
- sizeof(p_rss->rss_key));
-
- table_size = OSAL_MIN_T(u16,
- OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
- (1 << p_rss_tlv->rss_table_size_log));
-
- max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
-
- for (i = 0; i < table_size; i++) {
- q_idx = p_rss->rss_ind_table[i];
- if (q_idx >= max_q_idx) {
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d, rxq is out of range\n",
- i, q_idx);
- /* TBD: fail the request mark VF as malicious */
- p_rss->rss_ind_table[i] =
- vf->vf_queues[0].fw_rx_qid;
- } else if (!vf->vf_queues[q_idx].rxq_active) {
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d, rxq is not active\n",
- i, q_idx);
- /* TBD: fail the request mark VF as malicious */
- p_rss->rss_ind_table[i] =
- vf->vf_queues[0].fw_rx_qid;
- } else {
- p_rss->rss_ind_table[i] =
- vf->vf_queues[q_idx].fw_rx_qid;
- }
- }
-
- p_data->rss_params = p_rss;
- *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
- } else {
+ if (!p_rss_tlv) {
p_data->rss_params = OSAL_NULL;
+ return;
}
+
+ OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
+
+ p_rss->update_rss_config =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CONFIG_FLAG);
+ p_rss->update_rss_capabilities =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CAPS_FLAG);
+ p_rss->update_rss_ind_table =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+ p_rss->update_rss_key =
+ !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_KEY_FLAG);
+
+ p_rss->rss_enable = p_rss_tlv->rss_enable;
+ p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_caps = p_rss_tlv->rss_caps;
+ p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+ OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
+ sizeof(p_rss->rss_ind_table));
+ OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
+ sizeof(p_rss->rss_key));
+
+ table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
+ (1 << p_rss_tlv->rss_table_size_log));
+
+ max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
+
+ for (i = 0; i < table_size; i++) {
+ u16 index = vf->vf_queues[0].fw_rx_qid;
+
+ q_idx = p_rss->rss_ind_table[i];
+ if (q_idx >= max_q_idx)
+ DP_NOTICE(p_hwfn, true,
+ "rss_ind_table[%d] = %d,"
+ " rxq is out of range\n",
+ i, q_idx);
+ else if (!vf->vf_queues[q_idx].rxq_active)
+ DP_NOTICE(p_hwfn, true,
+ "rss_ind_table[%d] = %d, rxq is not active\n",
+ i, q_idx);
+ else
+ index = vf->vf_queues[q_idx].fw_rx_qid;
+ p_rss->rss_ind_table[i] = index;
+ }
+
+ p_data->rss_params = p_rss;
+ *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
}
static void
@@ -2105,11 +2543,21 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_update_params params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct ecore_sge_tpa_params sge_tpa_params;
+ u16 tlvs_mask = 0, tlvs_accepted = 0;
struct ecore_rss_params rss_params;
u8 status = PFVF_STATUS_SUCCESS;
- enum _ecore_status_t rc;
- u16 tlvs_mask = 0, tlvs_accepted;
u16 length;
+ enum _ecore_status_t rc;
+
+ /* Valiate PF can send such a request */
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d],"
+ " failing vport update\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
OSAL_MEMSET(&params, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
@@ -2137,7 +2585,7 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
*/
tlvs_accepted = tlvs_mask;
-#ifndef __EXTRACT__LINUX__
+#ifndef LINUX_REMOVE
if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
&params, &tlvs_accepted) !=
ECORE_SUCCESS) {
@@ -2150,7 +2598,8 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
if (!tlvs_accepted) {
if (tlvs_mask)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Upper-layer prevents said VF configuration\n");
+ "Upper-layer prevents said VF"
+ " configuration\n");
else
DP_NOTICE(p_hwfn, true,
"No feature tlvs found for vport update\n");
@@ -2171,16 +2620,12 @@ out:
}
static enum _ecore_status_t
-ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
- struct ecore_filter_ucast *p_params)
+ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_filter_ucast *p_params)
{
int i;
- /* TODO - do we need a MAC shadow registery? */
- if (p_params->type == ECORE_FILTER_MAC)
- return ECORE_SUCCESS;
-
/* First remove entries and then add new ones */
if (p_params->opcode == ECORE_FILTER_REMOVE) {
for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
@@ -2192,7 +2637,8 @@ ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
}
if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF [%d] - Tries to remove a non-existing vlan\n",
+ "VF [%d] - Tries to remove a non-existing"
+ " vlan\n",
p_vf->relative_vf_id);
return ECORE_INVAL;
}
@@ -2210,16 +2656,19 @@ ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
if (p_params->opcode == ECORE_FILTER_ADD ||
p_params->opcode == ECORE_FILTER_REPLACE) {
- for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
- if (!p_vf->shadow_config.vlans[i].used) {
- p_vf->shadow_config.vlans[i].used = true;
- p_vf->shadow_config.vlans[i].vid =
- p_params->vlan;
- break;
- }
+ for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ p_vf->shadow_config.vlans[i].used = true;
+ p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+ break;
+ }
+
if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF [%d] - Tries to configure more than %d vlan filters\n",
+ "VF [%d] - Tries to configure more than %d"
+ " vlan filters\n",
p_vf->relative_vf_id,
ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
return ECORE_INVAL;
@@ -2229,19 +2678,104 @@ ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static enum _ecore_status_t
+ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_filter_ucast *p_params)
+{
+ char empty_mac[ETH_ALEN];
+ int i;
+
+ OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
+
+ /* If we're in forced-mode, we don't allow any change */
+ /* TODO - this would change if we were ever to implement logic for
+ * removing a forced MAC altogether [in which case, like for vlans,
+ * we should be able to re-trace previous configuration.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ return ECORE_SUCCESS;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == ECORE_FILTER_REMOVE) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
+ p_params->mac, ETH_ALEN)) {
+ OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
+ ETH_ALEN);
+ break;
+ }
+ }
+
+ if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "MAC isn't configured\n");
+ return ECORE_INVAL;
+ }
+ } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
+ p_params->opcode == ECORE_FILTER_FLUSH) {
+ for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
+ OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
+ }
+
+ /* List the new MAC address */
+ if (p_params->opcode != ECORE_FILTER_ADD &&
+ p_params->opcode != ECORE_FILTER_REPLACE)
+ return ECORE_SUCCESS;
+
+ for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
+ empty_mac, ETH_ALEN)) {
+ OSAL_MEMCPY(p_vf->shadow_config.macs[i],
+ p_params->mac, ETH_ALEN);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Added MAC at %d entry in shadow\n", i);
+ break;
+ }
+ }
+
+ if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No available place for MAC\n");
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_filter_ucast *p_params)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (p_params->type == ECORE_FILTER_MAC) {
+ rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ if (p_params->type == ECORE_FILTER_VLAN)
+ rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
+
+ return rc;
+}
+
static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- struct vfpf_ucast_filter_tlv *req = &mbx->req_virt->ucast_filter;
struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
- struct ecore_filter_ucast params;
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_ucast_filter_tlv *req;
u8 status = PFVF_STATUS_SUCCESS;
+ struct ecore_filter_ucast params;
enum _ecore_status_t rc;
/* Prepare the unicast filter params */
OSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));
+ req = &mbx->req_virt->ucast_filter;
params.opcode = (enum ecore_filter_opcode)req->opcode;
params.type = (enum ecore_filter_ucast_type)req->type;
@@ -2254,7 +2788,8 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
params.vlan = req->vlan;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+ "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
+ " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
vf->abs_vf_id, params.opcode, params.type,
params.is_rx_filter ? "RX" : "",
params.is_tx_filter ? "TX" : "",
@@ -2264,7 +2799,8 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
if (!vf->vport_instance) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
+ "No VPORT instance available for VF[%d],"
+ " failing ucast MAC configuration\n",
vf->abs_vf_id);
status = PFVF_STATUS_FAILURE;
goto out;
@@ -2343,10 +2879,10 @@ static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
u8 status = PFVF_STATUS_SUCCESS;
/* Disable Interrupts for VF */
- ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);
+ ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
/* Reset Permission table */
- ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);
+ ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
length, status);
@@ -2357,11 +2893,27 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf)
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
ecore_iov_vf_cleanup(p_hwfn, p_vf);
+ if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
+ /* Stopping the VF */
+ rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
+ p_vf->opaque_fid);
+
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ p_vf->state = VF_STOPPED;
+ }
+
ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
- length, PFVF_STATUS_SUCCESS);
+ length, status);
}
static enum _ecore_status_t
@@ -2439,61 +2991,6 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static enum _ecore_status_t
-ecore_iov_vf_flr_poll_prs(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
-{
- u16 tc_cons[NUM_OF_TCS], tc_lb_cons[NUM_OF_TCS];
- u16 prod[NUM_OF_TCS];
- int i, cnt;
-
- /* Read initial consumers & producers */
- for (i = 0; i < NUM_OF_TCS; i++) {
- tc_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt,
- PRS_REG_MSG_CT_MAIN_0 + i * 0x4);
- tc_lb_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt,
- PRS_REG_MSG_CT_LB_0 + i * 0x4);
- prod[i] = (u16)ecore_rd(p_hwfn, p_ptt,
- BRB_REG_PER_TC_COUNTERS +
- p_hwfn->port_id * 0x20 + i * 0x4);
- }
-
- /* Wait for consumers to pass the producers */
- i = 0;
- for (cnt = 0; cnt < 50; cnt++) {
- for (; i < NUM_OF_TCS; i++) {
- u16 cons;
-
- cons = (u16)ecore_rd(p_hwfn, p_ptt,
- PRS_REG_MSG_CT_MAIN_0 + i * 0x4);
- if (prod[i] - tc_cons[i] > cons - tc_cons[i])
- break;
-
- cons = (u16)ecore_rd(p_hwfn, p_ptt,
- PRS_REG_MSG_CT_LB_0 + i * 0x4);
- if (prod[i] - tc_lb_cons[i] > cons - tc_lb_cons[i])
- break;
- }
-
- if (i == NUM_OF_TCS)
- break;
-
- /* 16-bit counters; Delay instead of sleep... */
- OSAL_UDELAY(10);
- }
-
- /* This is only optional polling for BB, since registers are only
- * 16-bit wide and guarantee is not good enough. Don't fail things
- * if polling didn't return the expected results.
- */
- if (cnt == 50)
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] - prs polling failed on TC %d\n",
- p_vf->abs_vf_id, i);
-
- return ECORE_SUCCESS;
-}
-
static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf,
struct ecore_ptt *p_ptt)
@@ -2510,10 +3007,6 @@ static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
if (rc)
return rc;
- rc = ecore_iov_vf_flr_poll_prs(p_hwfn, p_vf, p_ptt);
- if (rc)
- return rc;
-
return ECORE_SUCCESS;
}
@@ -2522,8 +3015,8 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id, u32 *ack_vfs)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_vf_info *p_vf;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
if (!p_vf)
@@ -2541,7 +3034,7 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
ecore_iov_vf_cleanup(p_hwfn, p_vf);
/* If VF isn't active, no need for anything but SW */
- if (!ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, p_vf->relative_vf_id))
+ if (!p_vf->b_init)
goto cleanup;
/* TODO - what to do in case of failure? */
@@ -2591,7 +3084,13 @@ enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
- for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
+ /* Since BRB <-> PRS interface can't be tested as part of the flr
+ * polling due to HW limitations, simply sleep a bit. And since
+ * there's no need to wait per-vf, do it before looping.
+ */
+ OSAL_MSLEEP(100);
+
+ for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
@@ -2607,6 +3106,9 @@ ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+ /* Wait instead of polling the BRB <-> PRS interface */
+ OSAL_MSLEEP(100);
+
ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
@@ -2623,8 +3125,13 @@ int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
"[%08x,...,%08x]: %08x\n",
i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+ if (!p_hwfn->p_dev->p_iov_info) {
+ DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
+ return 0;
+ }
+
/* Mark VFs */
- for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++) {
+ for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
struct ecore_vf_info *p_vf;
u8 vfid;
@@ -2656,43 +3163,6 @@ int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
return found;
}
-void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
- u16 vfid,
- struct ecore_mcp_link_params *params,
- struct ecore_mcp_link_state *link,
- struct ecore_mcp_link_capabilities *p_caps)
-{
- struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
- struct ecore_bulletin_content *p_bulletin;
-
- if (!p_vf)
- return;
-
- p_bulletin = p_vf->bulletin.p_virt;
- p_bulletin->req_autoneg = params->speed.autoneg;
- p_bulletin->req_adv_speed = params->speed.advertised_speeds;
- p_bulletin->req_forced_speed = params->speed.forced_speed;
- p_bulletin->req_autoneg_pause = params->pause.autoneg;
- p_bulletin->req_forced_rx = params->pause.forced_rx;
- p_bulletin->req_forced_tx = params->pause.forced_tx;
- p_bulletin->req_loopback = params->loopback_mode;
-
- p_bulletin->link_up = link->link_up;
- p_bulletin->speed = link->speed;
- p_bulletin->full_duplex = link->full_duplex;
- p_bulletin->autoneg = link->an;
- p_bulletin->autoneg_complete = link->an_complete;
- p_bulletin->parallel_detection = link->parallel_detection;
- p_bulletin->pfc_enabled = link->pfc_enabled;
- p_bulletin->partner_adv_speed = link->partner_adv_speed;
- p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
- p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
- p_bulletin->partner_adv_pause = link->partner_adv_pause;
- p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
-
- p_bulletin->capability_speed = p_caps->speed_capabilities;
-}
-
void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
u16 vfid,
struct ecore_mcp_link_params *p_params,
@@ -2720,7 +3190,6 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
{
struct ecore_iov_vf_mbx *mbx;
struct ecore_vf_info *p_vf;
- int i;
p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf)
@@ -2731,18 +3200,22 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
/* ecore_iov_process_mbx_request */
DP_VERBOSE(p_hwfn,
ECORE_MSG_IOV,
- "ecore_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+ "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
mbx->first_tlv = mbx->req_virt->first_tlv;
+ OSAL_IOV_VF_MSG_TYPE(p_hwfn,
+ p_vf->relative_vf_id,
+ mbx->first_tlv.tl.type);
+
+ /* Lock the per vf op mutex and note the locker's identity.
+ * The unlock will take place in mbx response.
+ */
+ ecore_iov_lock_vf_pf_channel(p_hwfn,
+ p_vf, mbx->first_tlv.tl.type);
+
/* check if tlv type is known */
if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
- /* Lock the per vf op mutex and note the locker's identity.
- * The unlock will take place in mbx response.
- */
- ecore_iov_lock_vf_pf_channel(p_hwfn,
- p_vf, mbx->first_tlv.tl.type);
-
/* switch on the opcode */
switch (mbx->first_tlv.tl.type) {
case CHANNEL_TLV_ACQUIRE:
@@ -2785,10 +3258,6 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break;
}
-
- ecore_iov_unlock_vf_pf_channel(p_hwfn,
- p_vf, mbx->first_tlv.tl.type);
-
} else {
/* unknown TLV - this may belong to a VF driver from the future
* - a version written after this PF driver was written, which
@@ -2796,54 +3265,78 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
* support them. Or this may be because someone wrote a crappy
* VF driver and is sending garbage over the channel.
*/
- DP_ERR(p_hwfn,
- "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
- mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
-
- for (i = 0; i < 20; i++) {
- DP_VERBOSE(p_hwfn,
- ECORE_MSG_IOV,
- "%x ",
- mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
- }
-
- /* test whether we can respond to the VF (do we have an address
- * for it?)
+ DP_NOTICE(p_hwfn, false,
+ "VF[%02x]: unknown TLV. type %04x length %04x"
+ " padding %08x reply address %lu\n",
+ p_vf->abs_vf_id,
+ mbx->first_tlv.tl.type,
+ mbx->first_tlv.tl.length,
+ mbx->first_tlv.padding,
+ (unsigned long)mbx->first_tlv.reply_address);
+
+ /* Try replying in case reply address matches the acquisition's
+ * posted address.
*/
- if (p_vf->state == VF_ACQUIRED)
- DP_ERR(p_hwfn, "UNKNOWN TLV Not supported yet\n");
+ if (p_vf->acquire.first_tlv.reply_address &&
+ (mbx->first_tlv.reply_address ==
+ p_vf->acquire.first_tlv.reply_address))
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_NOT_SUPPORTED);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Can't respond to TLV -"
+ " no valid reply address\n",
+ p_vf->abs_vf_id);
}
+ ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
+ mbx->first_tlv.tl.type);
+
#ifdef CONFIG_ECORE_SW_CHANNEL
mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
mbx->sw_mbx.response_offset = 0;
#endif
}
+void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
+{
+ u64 add_bit = 1ULL << (vfid % 64);
+
+ /* TODO - add locking mechanisms [no atomics in ecore, so we can't
+ * add the lock inside the ecore_pf_iov struct].
+ */
+ p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+}
+
+void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events)
+{
+ u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+
+ /* TODO - Take a lock */
+ OSAL_MEMCPY(events, p_pending_events,
+ sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+ OSAL_MEMSET(p_pending_events, 0,
+ sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+}
+
static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
- __le16 vfid,
+ u16 abs_vfid,
struct regpair *vf_msg)
{
+ u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
struct ecore_vf_info *p_vf;
- u8 min, max;
- if (!p_hwfn->pf_iov_info || !p_hwfn->pf_iov_info->vfs_array) {
+ if (!ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Got a message from VF while PF is not initialized for IOV support\n");
+ "Got a message from VF [abs 0x%08x] that cannot be"
+ " handled by PF\n",
+ abs_vfid);
return ECORE_SUCCESS;
}
-
- /* Find the VF record - message comes with realtive [engine] vfid */
- min = (u8)p_hwfn->hw_info.first_vf_in_pf;
- max = min + p_hwfn->p_dev->sriov_info.total_vfs;
- /* @@@TBD - for BE machines, should echo field be reversed? */
- if ((u8)vfid < min || (u8)vfid >= max) {
- DP_INFO(p_hwfn,
- "Got a message from VF with relative id 0x%08x, but PF's range is [0x%02x,...,0x%02x)\n",
- (u8)vfid, min, max);
- return ECORE_INVAL;
- }
- p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)vfid - min];
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
/* List the physical address of the request so that handler
* could later on copy the message from it.
@@ -2860,7 +3353,7 @@ enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
- return ecore_sriov_vfpf_msg(p_hwfn, echo,
+ return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
&data->vf_pf_channel.msg_addr);
case COMMON_EVENT_VF_FLR:
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
@@ -2879,51 +3372,20 @@ bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
(1ULL << (rel_vf_id % 64)));
}
-bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
- bool b_enabled_only)
-{
- if (!p_hwfn->pf_iov_info) {
- DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
- return false;
- }
-
- return b_enabled_only ? ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id) :
- (rel_vf_id < p_hwfn->p_dev->sriov_info.total_vfs);
-}
-
-struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn
- *p_hwfn,
- u16 relative_vf_id,
- bool b_enabled_only)
+u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
{
- struct ecore_vf_info *vf = OSAL_NULL;
-
- vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
- if (!vf)
- return OSAL_NULL;
-
- return &vf->p_vf_info;
-}
-
-void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
-{
- u64 add_bit = 1ULL << (vfid % 64);
+ struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+ u16 i;
- /* TODO - add locking mechanisms [no atomics in ecore, so we can't
- * add the lock inside the ecore_pf_iov struct].
- */
- p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
-}
+ if (!p_iov)
+ goto out;
-void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
- u64 *events)
-{
- u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+ for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+ if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+ return i;
- /* TODO - Take a lock */
- OSAL_MEMCPY(events, p_pending_events,
- sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
- OSAL_MEMSET(p_pending_events, 0, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+out:
+ return MAX_NUM_VFS;
}
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
@@ -3004,29 +3466,6 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
- u16 pvid, int vfid)
-{
- struct ecore_vf_info *vf_info;
- u64 feature;
-
- vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
- if (!vf_info) {
- DP_NOTICE(p_hwfn->p_dev, true,
- "Can not set forced MAC, invalid vfid [%d]\n", vfid);
- return;
- }
-
- feature = 1 << VLAN_ADDR_FORCED;
- vf_info->bulletin.p_virt->pvid = pvid;
- if (pvid)
- vf_info->bulletin.p_virt->valid_bitmap |= feature;
- else
- vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
-
- ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
-}
-
enum _ecore_status_t
ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
bool b_untagged_only, int vfid)
@@ -3046,7 +3485,8 @@ ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
*/
if (vf_info->state == VF_ENABLED) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Can't support untagged change for vfid[%d] - VF is already active\n",
+ "Can't support untagged change for vfid[%d] -"
+ " VF is already active\n",
vfid);
return ECORE_INVAL;
}
@@ -3088,6 +3528,30 @@ void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
*p_vort_id = vf_info->vport_id;
}
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+ u16 pvid, int vfid)
+{
+ struct ecore_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set forced MAC, invalid vfid [%d]\n",
+ vfid);
+ return;
+ }
+
+ feature = 1 << VLAN_ADDR_FORCED;
+ vf_info->bulletin.p_virt->pvid = pvid;
+ if (pvid)
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ else
+ vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_vf_info *p_vf_info;
@@ -3104,6 +3568,8 @@ bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
struct ecore_vf_info *p_vf_info;
p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf_info)
+ return true;
return p_vf_info->state == VF_STOPPED;
}
@@ -3119,21 +3585,11 @@ bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
return vf_info->spoof_chk;
}
-bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
-{
- if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
- !IS_PF_SRIOV_ALLOC(p_hwfn) ||
- !ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, vfid))
- return false;
- else
- return true;
-}
-
enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
int vfid, bool val)
{
- enum _ecore_status_t rc = ECORE_INVAL;
struct ecore_vf_info *vf;
+ enum _ecore_status_t rc = ECORE_INVAL;
if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
DP_NOTICE(p_hwfn, true,
@@ -3263,8 +3719,8 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
int vfid, int val)
{
struct ecore_vf_info *vf;
- enum _ecore_status_t rc;
u8 abs_vp_id = 0;
+ enum _ecore_status_t rc;
vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
@@ -3275,16 +3731,13 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
-
- return rc;
+ return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
}
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
int vfid, u32 rate)
{
struct ecore_vf_info *vf;
- enum _ecore_status_t rc;
u8 vport_id;
int i;
@@ -3293,7 +3746,8 @@ enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
DP_NOTICE(p_hwfn, true,
- "SR-IOV sanity check failed, can't set min rate\n");
+ "SR-IOV sanity check failed,"
+ " can't set min rate\n");
return ECORE_INVAL;
}
}
@@ -3301,9 +3755,7 @@ enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
vport_id = vf->vport_id;
- rc = ecore_configure_vport_wfq(p_dev, vport_id, rate);
-
- return rc;
+ return ecore_configure_vport_wfq(p_dev, vport_id, rate);
}
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index 3471e5c6..ed6ddc49 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -14,8 +14,6 @@
#include "ecore_iov_api.h"
#include "ecore_hsi_common.h"
-#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
-
#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
(MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
@@ -33,37 +31,18 @@ struct ecore_vf_mbx_msg {
union pfvf_tlvs resp;
};
-/* This data is held in the ecore_hwfn structure for VFs only. */
-struct ecore_vf_iov {
- union vfpf_tlvs *vf2pf_request;
- dma_addr_t vf2pf_request_phys;
- union pfvf_tlvs *pf2vf_reply;
- dma_addr_t pf2vf_reply_phys;
-
- /* Should be taken whenever the mailbox buffers are accessed */
- osal_mutex_t mutex;
- u8 *offset;
-
- /* Bulletin Board */
- struct ecore_bulletin bulletin;
- struct ecore_bulletin_content bulletin_shadow;
-
- /* we set aside a copy of the acquire response */
- struct pfvf_acquire_resp_tlv acquire_resp;
-};
-
/* This mailbox is maintained per VF in its PF
* contains all information required for sending / receiving
* a message
*/
struct ecore_iov_vf_mbx {
- union vfpf_tlvs *req_virt;
- dma_addr_t req_phys;
- union pfvf_tlvs *reply_virt;
- dma_addr_t reply_phys;
+ union vfpf_tlvs *req_virt;
+ dma_addr_t req_phys;
+ union pfvf_tlvs *reply_virt;
+ dma_addr_t reply_phys;
/* Address in VF where a pending message is located */
- dma_addr_t pending_req;
+ dma_addr_t pending_req;
u8 *offset;
@@ -72,12 +51,12 @@ struct ecore_iov_vf_mbx {
#endif
/* VF GPA address */
- u32 vf_addr_lo;
- u32 vf_addr_hi;
+ u32 vf_addr_lo;
+ u32 vf_addr_hi;
- struct vfpf_first_tlv first_tlv; /* saved VF request header */
+ struct vfpf_first_tlv first_tlv; /* saved VF request header */
- u8 flags;
+ u8 flags;
#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
* more then one pending msg
*/
@@ -91,21 +70,12 @@ struct ecore_vf_q_info {
u8 txq_active;
};
-enum int_mod {
- VPORT_INT_MOD_UNDEFINED = 0,
- VPORT_INT_MOD_ADAPTIVE = 1,
- VPORT_INT_MOD_OFF = 2,
- VPORT_INT_MOD_LOW = 100,
- VPORT_INT_MOD_MEDIUM = 200,
- VPORT_INT_MOD_HIGH = 300
-};
-
enum vf_state {
- VF_FREE = 0, /* VF ready to be acquired holds no resc */
- VF_ACQUIRED = 1, /* VF, acquired, but not initalized */
- VF_ENABLED = 2, /* VF, Enabled */
- VF_RESET = 3, /* VF, FLR'd, pending cleanup */
- VF_STOPPED = 4 /* VF, Stopped */
+ VF_FREE = 0, /* VF ready to be acquired holds no resc */
+ VF_ACQUIRED = 1, /* VF, acquired, but not initalized */
+ VF_ENABLED = 2, /* VF, Enabled */
+ VF_RESET = 3, /* VF, FLR'd, pending cleanup */
+ VF_STOPPED = 4 /* VF, Stopped */
};
struct ecore_vf_vlan_shadow {
@@ -117,6 +87,8 @@ struct ecore_vf_shadow_config {
/* Shadow copy of all guest vlans */
struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
+ /* Shadow copy of all configured MACs; Empty if forcing MACs */
+ u8 macs[ECORE_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
u8 inner_vlan_removal;
};
@@ -124,34 +96,37 @@ struct ecore_vf_shadow_config {
struct ecore_vf_info {
struct ecore_iov_vf_mbx vf_mbx;
enum vf_state state;
- u8 to_disable;
+ bool b_init;
+ u8 to_disable;
- struct ecore_bulletin bulletin;
- dma_addr_t vf_bulletin;
+ struct ecore_bulletin bulletin;
+ dma_addr_t vf_bulletin;
- u32 concrete_fid;
- u16 opaque_fid;
- u16 mtu;
+ /* PF saves a copy of the last VF acquire message */
+ struct vfpf_acquire_tlv acquire;
- u8 vport_id;
- u8 relative_vf_id;
- u8 abs_vf_id;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 relative_vf_id;
+ u8 abs_vf_id;
#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
(p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
(p_vf)->abs_vf_id)
- u8 vport_instance; /* Number of active vports */
- u8 num_rxqs;
- u8 num_txqs;
+ u8 vport_instance; /* Number of active vports */
+ u8 num_rxqs;
+ u8 num_txqs;
- u8 num_sbs;
+ u8 num_sbs;
- u8 num_mac_filters;
- u8 num_vlan_filters;
- u8 num_mc_filters;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
- struct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
- u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
+ struct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
/* TODO - Only windows is using it - should be removed */
u8 was_malicious;
@@ -159,7 +134,7 @@ struct ecore_vf_info {
void *ctx;
struct ecore_public_vf_info p_vf_info;
bool spoof_chk; /* Current configured on HW */
- bool req_spoofchk_val; /* Requested value */
+ bool req_spoofchk_val; /* Requested value */
/* Stores the configuration requested by VF */
struct ecore_vf_shadow_config shadow_config;
@@ -176,36 +151,40 @@ struct ecore_vf_info {
* capability enabled.
*/
struct ecore_pf_iov {
- struct ecore_vf_info vfs_array[MAX_NUM_VFS];
- u64 pending_events[ECORE_VF_ARRAY_LENGTH];
- u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
- u16 base_vport_id;
+ struct ecore_vf_info vfs_array[MAX_NUM_VFS];
+ u64 pending_events[ECORE_VF_ARRAY_LENGTH];
+ u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
+ u16 base_vport_id;
+
+#ifndef REMOVE_DBG
+ /* This doesn't serve anything functionally, but it makes windows
+ * debugging of IOV related issues easier.
+ */
+ u64 active_vfs[ECORE_VF_ARRAY_LENGTH];
+#endif
/* Allocate message address continuosuly and split to each VF */
- void *mbx_msg_virt_addr;
- dma_addr_t mbx_msg_phys_addr;
- u32 mbx_msg_size;
- void *mbx_reply_virt_addr;
- dma_addr_t mbx_reply_phys_addr;
- u32 mbx_reply_size;
- void *p_bulletins;
- dma_addr_t bulletins_phys;
- u32 bulletins_size;
+ void *mbx_msg_virt_addr;
+ dma_addr_t mbx_msg_phys_addr;
+ u32 mbx_msg_size;
+ void *mbx_reply_virt_addr;
+ dma_addr_t mbx_reply_phys_addr;
+ u32 mbx_reply_size;
+ void *p_bulletins;
+ dma_addr_t bulletins_phys;
+ u32 bulletins_size;
};
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief Read sriov related information and allocated resources
- * reads from configuraiton space, shmem, and allocates the VF
- * database in the PF.
+ * reads from configuraiton space, shmem, etc.
*
* @param p_hwfn
- * @param p_ptt
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
@@ -217,8 +196,10 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
*
* @return pointer to the newly placed tlv
*/
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
- u8 **offset, u16 type, u16 length);
+void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
+ u8 **offset,
+ u16 type,
+ u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
@@ -226,7 +207,8 @@ void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param tlvs_list
*/
-void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list);
+void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
+ void *tlvs_list);
/**
* @brief ecore_iov_alloc - allocate sriov related resources
@@ -243,7 +225,8 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
* @param p_hwfn
* @param p_ptt
*/
-void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief ecore_iov_free - free sriov related resources
@@ -253,6 +236,13 @@ void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
void ecore_iov_free(struct ecore_hwfn *p_hwfn);
/**
+ * @brief free sriov related memory that was allocated during hw_prepare
+ *
+ * @param p_dev
+ */
+void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
+
+/**
* @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
*
* @param p_hwfn
@@ -260,9 +250,9 @@ void ecore_iov_free(struct ecore_hwfn *p_hwfn);
* @param echo
* @param data
*/
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
+enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
union event_ring_data *data);
/**
@@ -274,7 +264,9 @@ enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
*
* @return calculated crc over buffer [with respect to seed].
*/
-u32 ecore_crc32(u32 crc, u8 *ptr, u32 length);
+u32 ecore_crc32(u32 crc,
+ u8 *ptr,
+ u32 length);
/**
* @brief Mark structs of vfs that have been FLR-ed.
@@ -284,7 +276,8 @@ u32 ecore_crc32(u32 crc, u8 *ptr, u32 length);
*
* @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
*/
-int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *disabled_vfs);
+int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
+ u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
@@ -312,79 +305,5 @@ void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
u16 relative_vf_id,
bool b_enabled_only);
-#else
-static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn
- *p_hwfn,
- struct ecore_ptt
- *p_ptt)
-{
- return ECORE_SUCCESS;
-}
-
-static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, u8 **offset,
- u16 type, u16 length)
-{
- return OSAL_NULL;
-}
-
-static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
- void *tlvs_list)
-{
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn
- *p_hwfn)
-{
- return ECORE_SUCCESS;
-}
-
-static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
-}
-
-static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn *p_hwfn)
-{
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn
- *p_hwfn,
- u8 opcode,
- __le16 echo,
- union
- event_ring_data
- * data)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
-{
- return 0;
-}
-
-static OSAL_INLINE int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
- u32 *disabled_vfs)
-{
- return 0;
-}
-
-static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
- void *p_tlvs_list,
- u16 req_type)
-{
- return OSAL_NULL;
-}
-
-static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn
- *p_hwfn,
- u16
- relative_vf_id,
- bool
- b_enabled_only)
-{
- return OSAL_NULL;
-}
-
#endif
#endif /* __ECORE_SRIOV_H__ */
diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h
index 98d40bb5..6277bc80 100644
--- a/drivers/net/qede/base/ecore_status.h
+++ b/drivers/net/qede/base/ecore_status.h
@@ -10,18 +10,18 @@
#define __ECORE_STATUS_H__
enum _ecore_status_t {
- ECORE_UNKNOWN_ERROR = -12,
- ECORE_NORESOURCES = -11,
- ECORE_NODEV = -10,
+ ECORE_UNKNOWN_ERROR = -12,
+ ECORE_NORESOURCES = -11,
+ ECORE_NODEV = -10,
ECORE_ABORTED = -9,
- ECORE_AGAIN = -8,
+ ECORE_AGAIN = -8,
ECORE_NOTIMPL = -7,
- ECORE_EXISTS = -6,
- ECORE_IO = -5,
+ ECORE_EXISTS = -6,
+ ECORE_IO = -5,
ECORE_TIMEOUT = -4,
- ECORE_INVAL = -3,
- ECORE_BUSY = -2,
- ECORE_NOMEM = -1,
+ ECORE_INVAL = -3,
+ ECORE_BUSY = -2,
+ ECORE_NOMEM = -1,
ECORE_SUCCESS = 0,
/* PENDING is not an error and should be positive */
ECORE_PENDING = 1,
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index d32fb35a..be8b1ec4 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -53,15 +53,27 @@ static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
return p_tlv;
}
+static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
+ enum _ecore_status_t req_status)
+{
+ union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF request status = 0x%x, PF reply status = 0x%x\n",
+ req_status, resp->default_resp.hdr.status);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
+}
+
static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
u8 *done, u32 resp_size)
{
- struct ustorm_vf_zone *zone_data = (struct ustorm_vf_zone *)
- ((u8 *)PXP_VF_BAR0_START_USDM_ZONE_B);
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
+ struct ustorm_vf_zone *zone_data;
int rc = ECORE_SUCCESS, time = 100;
- u8 pf_id;
+
+ zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
/* output tlvs list */
ecore_dp_tlv_list(p_hwfn, p_req);
@@ -69,25 +81,25 @@ static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
/* need to add the END TLV to the message size */
resp_size += sizeof(struct channel_list_end_tlv);
- if (!p_hwfn->p_dev->sriov_info.b_hw_channel) {
+ if (!p_hwfn->p_dev->b_hw_channel) {
rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,
done,
p_req,
p_hwfn->vf_iov_info->pf2vf_reply,
sizeof(union vfpf_tlvs), resp_size);
/* TODO - no prints about message ? */
- goto exit;
+ return rc;
}
/* Send TLVs over HW channel */
OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
trigger.vf_pf_msg_valid = 1;
- /* TODO - FW should remove this requirement */
- pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PFID);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
- pf_id,
+ "VF -> PF [%02x] message: [%08x, %08x] --> %p,"
+ " %08x --> %p\n",
+ GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID),
U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),
U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),
&zone_data->non_trigger.vf_pf_msg_addr,
@@ -109,6 +121,9 @@ static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger,
*((u32 *)&trigger));
+ /* When PF would be done with the response, it would write back to the
+ * `done' address. Poll until then.
+ */
while ((!*done) && time) {
OSAL_MSLEEP(25);
time--;
@@ -119,22 +134,41 @@ static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
"VF <-- PF Timeout [Type %d]\n",
p_req->first_tlv.tl.type);
rc = ECORE_TIMEOUT;
- goto exit;
+ return rc;
} else {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"PF response: %d [Type %d]\n",
*done, p_req->first_tlv.tl.type);
}
-exit:
- OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
-
return rc;
}
#define VF_ACQUIRE_THRESH 3
-#define VF_ACQUIRE_MAC_FILTERS 1
-#define VF_ACQUIRE_MC_FILTERS 10
+static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF unwilling to fullill resource request: rxq [%02x/%02x]"
+ " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
+ " vlan [%02x/%02x] mc [%02x/%02x]."
+ " Try PF recommended amount\n",
+ p_req->num_rxqs, p_resp->num_rxqs,
+ p_req->num_rxqs, p_resp->num_txqs,
+ p_req->num_sbs, p_resp->num_sbs,
+ p_req->num_mac_filters, p_resp->num_mac_filters,
+ p_req->num_vlan_filters, p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* humble our request */
+ p_req->num_txqs = p_resp->num_txqs;
+ p_req->num_rxqs = p_resp->num_rxqs;
+ p_req->num_sbs = p_resp->num_sbs;
+ p_req->num_mac_filters = p_resp->num_mac_filters;
+ p_req->num_vlan_filters = p_resp->num_vlan_filters;
+ p_req->num_mc_filters = p_resp->num_mc_filters;
+}
static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
{
@@ -142,26 +176,24 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct ecore_vf_acquire_sw_info vf_sw_info;
- struct vfpf_acquire_tlv *req;
- int rc = 0, attempts = 0;
+ struct vf_pf_resc_request *p_resc;
bool resources_acquired = false;
-
- /* @@@ TBD: MichalK take this from somewhere else... */
- u8 rx_count = 1, tx_count = 1, num_sbs = 1;
- u8 num_mac = VF_ACQUIRE_MAC_FILTERS, num_mc = VF_ACQUIRE_MC_FILTERS;
+ struct vfpf_acquire_tlv *req;
+ int attempts = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+ p_resc = &req->resc_request;
/* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */
req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
- req->resc_request.num_rxqs = rx_count;
- req->resc_request.num_txqs = tx_count;
- req->resc_request.num_sbs = num_sbs;
- req->resc_request.num_mac_filters = num_mac;
- req->resc_request.num_mc_filters = num_mc;
- req->resc_request.num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+ p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
+ p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
@@ -172,9 +204,11 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
req->vfdev_info.fw_minor = FW_MINOR_VERSION;
req->vfdev_info.fw_revision = FW_REVISION_VERSION;
req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+ req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
- if (vf_sw_info.override_fw_version)
- req->vfdev_info.capabilties |= VFPF_ACQUIRE_CAP_OVERRIDE_FW_VER;
+ /* Fill capability field with any non-deprecated config we support */
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
/* pf 2 vf bulletin board address */
req->bulletin_addr = p_iov->bulletin.phys;
@@ -189,6 +223,10 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"attempting to acquire resources\n");
+ /* Clear response buffer, as this might be a re-send */
+ OSAL_MEMSET(p_iov->pf2vf_reply, 0,
+ sizeof(union pfvf_tlvs));
+
/* send acquire request */
rc = ecore_send_msg2pf(p_hwfn,
&resp->hdr.status, sizeof(*resp));
@@ -203,46 +241,83 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
attempts++;
- /* PF agrees to allocate our resources */
if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF agrees to allocate our resources */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
+ /* It's possible legacy PF mistakenly accepted;
+ * but we don't care - simply mark it as
+ * legacy and continue.
+ */
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"resources acquired\n");
resources_acquired = true;
} /* PF refuses to allocate our resources */
- else if (resp->hdr.status ==
- PFVF_STATUS_NO_RESOURCE &&
+ else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
attempts < VF_ACQUIRE_THRESH) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "PF unwilling to fullfill resource request. Try PF recommended amount\n");
-
- /* humble our request */
- req->resc_request.num_txqs = resp->resc.num_txqs;
- req->resc_request.num_rxqs = resp->resc.num_rxqs;
- req->resc_request.num_sbs = resp->resc.num_sbs;
- req->resc_request.num_mac_filters =
- resp->resc.num_mac_filters;
- req->resc_request.num_vlan_filters =
- resp->resc.num_vlan_filters;
- req->resc_request.num_mc_filters =
- resp->resc.num_mc_filters;
-
- /* Clear response buffer */
- OSAL_MEMSET(p_iov->pf2vf_reply, 0,
- sizeof(union pfvf_tlvs));
+ ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
+ &resp->resc);
+
+ } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
+ if (pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn, false,
+ "PF uses an incompatible fastpath HSI"
+ " %02x.%02x [VF requires %02x.%02x]."
+ " Please change to a VF driver using"
+ " %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR,
+ pfdev_info->major_fp_hsi);
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ if (!pfdev_info->major_fp_hsi) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ DP_NOTICE(p_hwfn, false,
+ "PF uses very old drivers."
+ " Please change to a VF"
+ " driver using no later than"
+ " 8.8.x.x.\n");
+ rc = ECORE_INVAL;
+ goto exit;
+ } else {
+ DP_INFO(p_hwfn,
+ "PF is old - try re-acquire to"
+ " see if it supports FW-version"
+ " override\n");
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ }
+ }
} else {
DP_ERR(p_hwfn,
- "PF returned error %d to VF acquisition request\n",
+ "PF returned err %d to VF acquisition request\n",
resp->hdr.status);
- return ECORE_AGAIN;
+ rc = ECORE_AGAIN;
+ goto exit;
}
}
+ /* Mark the PF as legacy, if needed */
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI)
+ p_iov->b_pre_fp_hsi = true;
+
rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
if (rc) {
DP_NOTICE(p_hwfn, true,
- "VF_UPDATE_ACQUIRE_RESC_RESP Failed: status = 0x%x.\n",
+ "VF_UPDATE_ACQUIRE_RESC_RESP Failed:"
+ " status = 0x%x.\n",
rc);
- return ECORE_AGAIN;
+ rc = ECORE_AGAIN;
+ goto exit;
}
/* Update bulletin board size with response from PF */
@@ -256,129 +331,127 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
- /* @@@TBD MichalK: Fw ver... */
- /* strlcpy(p_hwfn->fw_ver, p_hwfn->acquire_resp.pfdev_info.fw_ver,
- * sizeof(p_hwfn->fw_ver));
- */
-
p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
- return 0;
+ /* Learn of the possibility of CMT */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
+ DP_INFO(p_hwfn, "100g VF\n");
+ p_hwfn->p_dev->num_hwfns = 2;
+ }
+ }
+
+ /* @DPDK */
+ if ((~p_iov->b_pre_fp_hsi &
+ ETH_HSI_VER_MINOR) &&
+ (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR))
+ DP_INFO(p_hwfn,
+ "PF is using older fastpath HSI;"
+ " %02x.%02x is configured\n",
+ ETH_HSI_VER_MAJOR,
+ resp->pfdev_info.minor_fp_hsi);
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
-enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev)
+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
{
- enum _ecore_status_t rc = ECORE_NOMEM;
- struct ecore_vf_iov *p_sriov;
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0]; /* @@@TBD CMT */
+ struct ecore_vf_iov *p_iov;
+ u32 reg;
- p_dev->num_hwfns = 1; /* @@@TBD CMT must be fixed... */
-
- p_hwfn->regview = p_dev->regview;
- if (p_hwfn->regview == OSAL_NULL) {
- DP_ERR(p_hwfn,
- "regview should be initialized before"
- " ecore_vf_hw_prepare is called\n");
- return ECORE_INVAL;
- }
+ /* Set number of hwfns - might be overridden once leading hwfn learns
+ * actual configuration from PF.
+ */
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->num_hwfns = 1;
/* Set the doorbell bar. Assumption: regview is set */
p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
PXP_VF_BAR0_START_DQ;
- p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
- PXP_VF_BAR0_ME_OPAQUE_ADDRESS);
+ reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
- p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn,
- PXP_VF_BAR0_ME_CONCRETE_ADDRESS);
+ reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
/* Allocate vf sriov info */
- p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
- if (!p_sriov) {
+ p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov));
+ if (!p_iov) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_sriov'\n");
return ECORE_NOMEM;
}
- OSAL_MEMSET(p_sriov, 0, sizeof(*p_sriov));
+ OSAL_MEMSET(p_iov, 0, sizeof(*p_iov));
/* Allocate vf2pf msg */
- p_sriov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
- &p_sriov->
+ p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov->
vf2pf_request_phys,
sizeof(union
vfpf_tlvs));
- if (!p_sriov->vf2pf_request) {
+ if (!p_iov->vf2pf_request) {
DP_NOTICE(p_hwfn, true,
- "Failed to allocate `vf2pf_request' DMA memory\n");
- goto free_p_sriov;
+ "Failed to allocate `vf2pf_request' DMA memory\n");
+ goto free_p_iov;
}
- p_sriov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
- &p_sriov->
+ p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov->
pf2vf_reply_phys,
sizeof(union pfvf_tlvs));
- if (!p_sriov->pf2vf_reply) {
+ if (!p_iov->pf2vf_reply) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `pf2vf_reply' DMA memory\n");
goto free_vf2pf_request;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF's Request mailbox [%p virt 0x%" PRIx64 " phys], "
- "Response mailbox [%p virt 0x%" PRIx64 " phys]\n",
- p_sriov->vf2pf_request,
- (u64)p_sriov->vf2pf_request_phys,
- p_sriov->pf2vf_reply, (u64)p_sriov->pf2vf_reply_phys);
+ "VF's Request mailbox [%p virt 0x%lx phys], "
+ "Response mailbox [%p virt 0x%lx phys]\n",
+ p_iov->vf2pf_request,
+ (unsigned long)p_iov->vf2pf_request_phys,
+ p_iov->pf2vf_reply,
+ (unsigned long)p_iov->pf2vf_reply_phys);
/* Allocate Bulletin board */
- p_sriov->bulletin.size = sizeof(struct ecore_bulletin_content);
- p_sriov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
- &p_sriov->bulletin.
+ p_iov->bulletin.size = sizeof(struct ecore_bulletin_content);
+ p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+ &p_iov->bulletin.
phys,
- p_sriov->bulletin.
+ p_iov->bulletin.
size);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF's bulletin Board [%p virt 0x%" PRIx64 " phys 0x%08x bytes]\n",
- p_sriov->bulletin.p_virt, (u64)p_sriov->bulletin.phys,
- p_sriov->bulletin.size);
+ "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
+ p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
+ p_iov->bulletin.size);
- OSAL_MUTEX_ALLOC(p_hwfn, &p_sriov->mutex);
- OSAL_MUTEX_INIT(&p_sriov->mutex);
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
+ OSAL_MUTEX_INIT(&p_iov->mutex);
- p_hwfn->vf_iov_info = p_sriov;
+ p_hwfn->vf_iov_info = p_iov;
p_hwfn->hw_info.personality = ECORE_PCI_ETH;
- /* First VF needs to query for information from PF */
- if (!p_hwfn->my_id)
- rc = ecore_vf_pf_acquire(p_hwfn);
-
- return rc;
+ return ecore_vf_pf_acquire(p_hwfn);
free_vf2pf_request:
- OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sriov->vf2pf_request,
- p_sriov->vf2pf_request_phys,
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys,
sizeof(union vfpf_tlvs));
-free_p_sriov:
- OSAL_FREE(p_hwfn->p_dev, p_sriov);
-
- return rc;
-}
-
-enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn)
-{
- p_hwfn->b_int_enabled = 1;
+free_p_iov:
+ OSAL_FREE(p_hwfn->p_dev, p_iov);
- return 0;
+ return ECORE_NOMEM;
}
-/* TEMP TEMP until in HSI */
#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
-#define USTORM_QZONE_START(dev) (MSTORM_QZONE_START + \
- (MSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
u8 rx_qid,
@@ -388,54 +461,76 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
- void OSAL_IOMEM * *pp_prod)
+ void OSAL_IOMEM **pp_prod)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_rxq_tlv *req;
- struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
int rc;
- u8 hw_qid;
- u64 init_prod_val = 0;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
- /* @@@TBD MichalK TPA */
-
req->rx_qid = rx_qid;
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
req->hw_sb = sb;
req->sb_index = sb_index;
- req->hc_rate = 0; /* @@@TBD MichalK -> host coalescing! */
req->bd_max_bytes = bd_max_bytes;
- req->stat_id = -1; /* No stats at the moment */
-
- /* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
- CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
+ req->stat_id = -1; /* Keep initialized, for future compatibility */
- if (pp_prod) {
- hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+ /* If PF is legacy, we'll need to calculate producers ourselves
+ * as well as clean them.
+ */
+ if (pp_prod && p_iov->b_pre_fp_hsi) {
+ u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+ u32 init_prod_val = 0;
*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
- MSTORM_QZONE_START(p_hwfn->p_dev) +
- (hw_qid) * MSTORM_QZONE_SIZE +
- OFFSETOF(struct mstorm_eth_queue_zone, rx_producers);
+ MSTORM_QZONE_START(p_hwfn->p_dev) +
+ (hw_qid) * MSTORM_QZONE_SIZE;
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
- __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
}
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_INVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+ /* Learn the address of the producer from the response */
+ if (pp_prod && !p_iov->b_pre_fp_hsi) {
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
+ rx_qid, *pp_prod, resp->offset);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0.
+ * It was actually the PF's responsibility, but since some
+ * old PFs might fail to do so, we do this as well.
+ */
+ OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)&init_prod_val);
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -445,17 +540,12 @@ enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_rxqs_tlv *req;
- struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ struct pfvf_def_resp_tlv *resp;
int rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
- /* @@@TBD MichalK TPA */
-
- /* @@@TBD MichalK - relevant ???
- * flags VFPF_QUEUE_FLG_OV VFPF_QUEUE_FLG_VLAN
- */
req->rx_qid = rx_qid;
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
@@ -465,12 +555,18 @@ enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->default_resp;
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_INVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -481,18 +577,16 @@ enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size,
- void OSAL_IOMEM * *pp_doorbell)
+ void OSAL_IOMEM **pp_doorbell)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req;
- struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
int rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
- /* @@@TBD MichalK TPA */
-
req->tx_qid = tx_queue_id;
/* Tx */
@@ -500,28 +594,44 @@ enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
req->pbl_size = pbl_size;
req->hw_sb = sb;
req->sb_index = sb_index;
- req->hc_rate = 0; /* @@@TBD MichalK -> host coalescing! */
- req->flags = 0; /* @@@TBD MichalK -> flags... */
/* add list termination tlv */
ecore_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->queue_start;
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_INVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
if (pp_doorbell) {
- u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
+ DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ tx_queue_id, *pp_doorbell, resp->offset);
}
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
return rc;
}
@@ -529,17 +639,12 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_txqs_tlv *req;
- struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ struct pfvf_def_resp_tlv *resp;
int rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
- /* @@@TBD MichalK TPA */
-
- /* @@@TBD MichalK - relevant ??? flags
- * VFPF_QUEUE_FLG_OV VFPF_QUEUE_FLG_VLAN
- */
req->tx_qid = tx_qid;
req->num_txqs = 1;
@@ -548,12 +653,18 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->default_resp;
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_INVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -586,10 +697,15 @@ enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_INVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -602,7 +718,7 @@ ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_vport_start_tlv *req;
- struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ struct pfvf_def_resp_tlv *resp;
int rc, i;
/* clear mailbox and prep first tlv */
@@ -625,12 +741,18 @@ ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->default_resp;
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_INVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
@@ -652,14 +774,56 @@ enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_INVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
+static bool
+ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn,
+ struct ecore_sp_vport_update_params *p_data,
+ u16 tlv)
+{
+ switch (tlv) {
+ case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
+ return !!(p_data->update_vport_active_rx_flg ||
+ p_data->update_vport_active_tx_flg);
+ case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
+#ifndef ASIC_ONLY
+ /* FPGA doesn't have PVFC and so can't support tx-switching */
+ return !!(p_data->update_tx_switching_flg &&
+ !CHIP_REV_IS_FPGA(p_hwfn->p_dev));
+#else
+ return !!p_data->update_tx_switching_flg;
+#endif
+ case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
+ return !!p_data->update_inner_vlan_removal_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
+ return !!p_data->update_accept_any_vlan_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_MCAST:
+ return !!p_data->update_approx_mcast_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
+ return !!(p_data->accept_flags.update_rx_mode_config ||
+ p_data->accept_flags.update_tx_mode_config);
+ case CHANNEL_TLV_VPORT_UPDATE_RSS:
+ return !!p_data->rss_params;
+ case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
+ return !!p_data->sge_tpa_params;
+ default:
+ DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
+ tlv, ecore_channel_tlvs_string[tlv]);
+ return false;
+ }
+}
+
static void
ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_update_params *p_data)
@@ -668,96 +832,20 @@ ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
struct pfvf_def_resp_tlv *p_resp;
u16 tlv;
- if (p_data->update_vport_active_rx_flg ||
- p_data->update_vport_active_tx_flg) {
- tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
- p_resp = (struct pfvf_def_resp_tlv *)
- ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
- if (p_resp && p_resp->hdr.status)
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VP update activate tlv configured\n");
- else
- DP_NOTICE(p_hwfn, true,
- "VP update activate tlv config failed\n");
- }
-
- if (p_data->update_tx_switching_flg) {
- tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
- p_resp = (struct pfvf_def_resp_tlv *)
- ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
- if (p_resp && p_resp->hdr.status)
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VP update tx switch tlv configured\n");
-#ifndef ASIC_ONLY
- else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
- DP_NOTICE(p_hwfn, false,
- "FPGA: Skip checking whether PF"
- " replied to Tx-switching request\n");
-#endif
- else
- DP_NOTICE(p_hwfn, true,
- "VP update tx switch tlv config failed\n");
- }
-
- if (p_data->update_inner_vlan_removal_flg) {
- tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
- p_resp = (struct pfvf_def_resp_tlv *)
- ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
- if (p_resp && p_resp->hdr.status)
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VP update vlan strip tlv configured\n");
- else
- DP_NOTICE(p_hwfn, true,
- "VP update vlan strip tlv config failed\n");
- }
-
- if (p_data->update_approx_mcast_flg) {
- tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
- p_resp = (struct pfvf_def_resp_tlv *)
- ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
- if (p_resp && p_resp->hdr.status)
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VP update mcast tlv configured\n");
- else
- DP_NOTICE(p_hwfn, true,
- "VP update mcast tlv config failed\n");
- }
+ for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ tlv < CHANNEL_TLV_VPORT_UPDATE_MAX;
+ tlv++) {
+ if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
+ continue;
- if (p_data->accept_flags.update_rx_mode_config ||
- p_data->accept_flags.update_tx_mode_config) {
- tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
p_resp = (struct pfvf_def_resp_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
if (p_resp && p_resp->hdr.status)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VP update accept_mode tlv configured\n");
- else
- DP_NOTICE(p_hwfn, true,
- "VP update accept_mode tlv config failed\n");
- }
-
- if (p_data->rss_params) {
- tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
- p_resp = (struct pfvf_def_resp_tlv *)
- ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
- if (p_resp && p_resp->hdr.status)
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VP update rss tlv configured\n");
- else
- DP_NOTICE(p_hwfn, true,
- "VP update rss tlv config failed\n");
- }
-
- if (p_data->sge_tpa_params) {
- tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
- p_resp = (struct pfvf_def_resp_tlv *)
- ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
- if (p_resp && p_resp->hdr.status)
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VP update sge tpa tlv configured\n");
- else
- DP_NOTICE(p_hwfn, true,
- "VP update sge tpa tlv config failed\n");
+ "TLV[%d] type %s Configuration %s\n",
+ tlv, ecore_channel_tlvs_string[tlv],
+ (p_resp && p_resp->hdr.status) ? "succeeded"
+ : "failed");
}
}
@@ -765,15 +853,7 @@ enum _ecore_status_t
ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_update_params *p_params)
{
- struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
- struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
- struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
- struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
- struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
- struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
- struct vfpf_vport_update_activate_tlv *p_act_tlv;
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
- struct vfpf_vport_update_rss_tlv *p_rss_tlv;
struct vfpf_vport_update_tlv *req;
struct pfvf_def_resp_tlv *resp;
u8 update_rx, update_tx;
@@ -792,6 +872,8 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
/* Prepare extended tlvs */
if (update_rx || update_tx) {
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+
size = sizeof(struct vfpf_vport_update_activate_tlv);
p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
@@ -810,6 +892,8 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
}
if (p_params->update_inner_vlan_removal_flg) {
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+
size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
@@ -820,6 +904,8 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
}
if (p_params->update_tx_switching_flg) {
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+
size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -830,6 +916,8 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
}
if (p_params->update_approx_mcast_flg) {
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+
size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_MCAST,
@@ -845,6 +933,8 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
update_tx = p_params->accept_flags.update_tx_mode_config;
if (update_rx || update_tx) {
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
size = sizeof(struct vfpf_vport_update_accept_param_tlv);
p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
@@ -865,6 +955,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
if (p_params->rss_params) {
struct ecore_rss_params *rss_params = p_params->rss_params;
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
size = sizeof(struct vfpf_vport_update_rss_tlv);
p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -893,6 +984,8 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
}
if (p_params->update_accept_any_vlan_flg) {
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+
size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -905,9 +998,10 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
}
if (p_params->sge_tpa_params) {
- struct ecore_sge_tpa_params *sge_tpa_params =
- p_params->sge_tpa_params;
+ struct ecore_sge_tpa_params *sge_tpa_params;
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+ sge_tpa_params = p_params->sge_tpa_params;
size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
@@ -953,21 +1047,26 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_INVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
return rc;
}
enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
- struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
int rc;
/* clear mailbox and prep first tlv */
@@ -978,23 +1077,29 @@ enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->default_resp;
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_AGAIN;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_AGAIN;
+ goto exit;
+ }
p_hwfn->b_int_enabled = 0;
- return ECORE_SUCCESS;
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
- struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
u32 size;
int rc;
@@ -1006,16 +1111,15 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->default_resp;
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
rc = ECORE_AGAIN;
- p_hwfn->b_int_enabled = 0;
+ ecore_vf_pf_req_end(p_hwfn, rc);
- /* TODO - might need to revise this for 100g */
- if (IS_LEAD_HWFN(p_hwfn))
- OSAL_MUTEX_DEALLOC(&p_iov->mutex);
+ p_hwfn->b_int_enabled = 0;
if (p_iov->vf2pf_request)
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
@@ -1068,7 +1172,7 @@ enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_ucast_filter_tlv *req;
- struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ struct pfvf_def_resp_tlv *resp;
int rc;
/* Sanitize */
@@ -1090,14 +1194,20 @@ enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->default_resp;
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_AGAIN;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_AGAIN;
+ goto exit;
+ }
- return ECORE_SUCCESS;
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
}
enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
@@ -1117,21 +1227,40 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
if (rc)
- return rc;
+ goto exit;
- if (resp->hdr.status != PFVF_STATUS_SUCCESS)
- return ECORE_INVAL;
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = ECORE_INVAL;
+ goto exit;
+ }
- return ECORE_SUCCESS;
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
+ u16 sb_id)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
+ return 0;
+ }
+
+ return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
}
enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
u8 *p_change)
{
- struct ecore_bulletin_content shadow;
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
- u32 crc, crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+ struct ecore_bulletin_content shadow;
+ u32 crc, crc_size;
+ crc_size = sizeof(p_iov->bulletin.p_virt->crc);
*p_change = 0;
/* Need to guarantee PF is not in the middle of writing it */
@@ -1158,18 +1287,6 @@ enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
-{
- struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
-
- if (!p_iov) {
- DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
- return 0;
- }
-
- return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
-}
-
void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params *p_params,
struct ecore_bulletin_content *p_bulletin)
@@ -1265,6 +1382,15 @@ void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
*num_mac = p_vf->acquire_resp.resc.num_mac_filters;
}
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs)
+{
+ struct ecore_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
+}
+
bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
{
struct ecore_bulletin_content *bulletin;
@@ -1317,6 +1443,11 @@ bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
return true;
}
+bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->vf_iov_info->b_pre_fp_hsi;
+}
+
void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor, u16 *fw_rev,
u16 *fw_eng)
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index 334b588c..6077d600 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -14,31 +14,42 @@
#include "ecore_l2_api.h"
#include "ecore_vfpf_if.h"
+/* This data is held in the ecore_hwfn structure for VFs only. */
+struct ecore_vf_iov {
+ union vfpf_tlvs *vf2pf_request;
+ dma_addr_t vf2pf_request_phys;
+ union pfvf_tlvs *pf2vf_reply;
+ dma_addr_t pf2vf_reply_phys;
+
+ /* Should be taken whenever the mailbox buffers are accessed */
+ osal_mutex_t mutex;
+ u8 *offset;
+
+ /* Bulletin Board */
+ struct ecore_bulletin bulletin;
+ struct ecore_bulletin_content bulletin_shadow;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* In case PF originates prior to the fp-hsi version comparison,
+ * this has to be propagated as it affects the fastpath.
+ */
+ bool b_pre_fp_hsi;
+};
+
#ifdef CONFIG_ECORE_SRIOV
/**
- *
* @brief hw preparation for VF
- * sends ACQUIRE message
- *
- * @param p_dev
- *
- * @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev);
-
-/**
- *
- * @brief VF init in hw (equivalent to hw_init in PF)
- * mark interrupts as enabled
+ * sends ACQUIRE message
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
/**
- *
* @brief VF - start the RX Queue by sending a message to the PF
*
* @param p_hwfn
@@ -51,7 +62,7 @@ enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn);
* @param cqe_pbl_addr - physical address of pbl
* @param cqe_pbl_size - pbl size
* @param pp_prod - pointer to the producer to be
- * used in fasthwfn
+ * used in fasthpath
*
* @return enum _ecore_status_t
*/
@@ -63,10 +74,9 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
- void OSAL_IOMEM * *pp_prod);
+ void OSAL_IOMEM **pp_prod);
/**
- *
* @brief VF - start the TX queue by sending a message to the
* PF.
*
@@ -76,7 +86,7 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
* @param sb_index - index within the status block
* @param bd_chain_phys_addr - physical address of tx chain
* @param pp_doorbell - pointer to address to which to
- * write the doorbell too..
+ * write the doorbell too..
*
* @return enum _ecore_status_t
*/
@@ -86,10 +96,9 @@ enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size,
- void OSAL_IOMEM * *pp_doorbell);
+ void OSAL_IOMEM **pp_doorbell);
/**
- *
* @brief VF - stop the RX queue by sending a message to the PF
*
* @param p_hwfn
@@ -98,11 +107,11 @@ enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_qid, bool cqe_completion);
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ u16 rx_qid,
+ bool cqe_completion);
/**
- *
* @brief VF - stop the TX queue by sending a message to the PF
*
* @param p_hwfn
@@ -110,9 +119,10 @@ enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_qid);
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ u16 tx_qid);
+#ifndef LINUX_REMOVE
/**
* @brief VF - update the RX queue by sending a message to the
* PF
@@ -126,14 +136,15 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- u8 num_rxqs,
- u8 comp_cqe_flg,
- u8 comp_event_flg);
+enum _ecore_status_t ecore_vf_pf_rxqs_update(
+ struct ecore_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8 comp_event_flg);
+#endif
/**
- *
* @brief VF - send a vport update command
*
* @param p_hwfn
@@ -146,7 +157,6 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_update_params *p_params);
/**
- *
* @brief VF - send a close message to PF
*
* @param p_hwfn
@@ -156,7 +166,6 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn);
/**
- *
* @brief VF - free vf`s memories
*
* @param p_hwfn
@@ -166,7 +175,6 @@ enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);
/**
- *
* @brief ecore_vf_get_igu_sb_id - Get the IGU SB ID for a given
* sb_id. For VFs igu sbs don't have to be contiguous
*
@@ -175,7 +183,9 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);
*
* @return INLINE u16
*/
-u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
+u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
+ u16 sb_id);
+
/**
* @brief ecore_vf_pf_vport_start - perform vport start for VF.
@@ -190,13 +200,14 @@ u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
*
* @return enum _ecore_status
*/
-enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn,
- u8 vport_id,
- u16 mtu,
- u8 inner_vlan_removal,
- enum ecore_tpa_mode tpa_mode,
- u8 max_buffers_per_cqe,
- u8 only_untagged);
+enum _ecore_status_t ecore_vf_pf_vport_start(
+ struct ecore_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum ecore_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe,
+ u8 only_untagged);
/**
* @brief ecore_vf_pf_vport_stop - stop the VF's vport
@@ -207,9 +218,9 @@ enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn);
-enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
- struct ecore_filter_ucast
- *p_param);
+enum _ecore_status_t ecore_vf_pf_filter_ucast(
+ struct ecore_hwfn *p_hwfn,
+ struct ecore_filter_ucast *p_param);
void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
struct ecore_filter_mcast *p_filter_cmd);
@@ -256,160 +267,5 @@ void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin);
-#else
-static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev
- *p_dev)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn
- *p_hwfn)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn
- *p_hwfn,
- u8 rx_queue_id,
- u16 sb,
- u8 sb_index,
- u16 bd_max_bytes,
- dma_addr_t
- bd_chain_phys_adr,
- dma_addr_t
- cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM *
- *pp_prod)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn
- *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t
- pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM *
- *pp_doorbell)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn
- *p_hwfn,
- u16 rx_qid,
- bool
- cqe_completion)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn
- *p_hwfn,
- u16 tx_qid)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxqs_update(struct
- ecore_hwfn
- * p_hwfn,
- u16 rx_queue_id,
- u8 num_rxqs,
- u8 comp_cqe_flg,
- u8
- comp_event_flg)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_update(
- struct ecore_hwfn *p_hwfn,
- struct ecore_sp_vport_update_params *p_params)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn
- *p_hwfn)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn
- *p_hwfn)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
- u16 sb_id)
-{
- return 0;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(
- struct ecore_hwfn *p_hwfn, u8 vport_id, u16 mtu,
- u8 inner_vlan_removal, enum ecore_tpa_mode tpa_mode,
- u8 max_buffers_per_cqe, u8 only_untagged)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop(
- struct ecore_hwfn *p_hwfn)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast(
- struct ecore_hwfn *p_hwfn, struct ecore_filter_ucast *p_param)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
- struct ecore_filter_mcast
- *p_filter_cmd)
-{
-}
-
-static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_int_cleanup(struct
- ecore_hwfn
- * p_hwfn)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_params
- *p_params,
- struct ecore_bulletin_content
- *p_bulletin)
-{
-}
-
-static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_state
- *p_link,
- struct ecore_bulletin_content
- *p_bulletin)
-{
-}
-
-static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
- struct
- ecore_mcp_link_capabilities
- * p_link_caps,
- struct ecore_bulletin_content
- *p_bulletin)
-{
-}
#endif
-
#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h
index f28b6860..571fd374 100644
--- a/drivers/net/qede/base/ecore_vf_api.h
+++ b/drivers/net/qede/base/ecore_vf_api.h
@@ -57,7 +57,8 @@ void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param num_rxqs - allocated RX queues
*/
-void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs);
+void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_rxqs);
/**
* @brief Get port mac address for VF
@@ -65,7 +66,8 @@ void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs);
* @param p_hwfn
* @param port_mac - destination location for port mac
*/
-void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac);
+void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
+ u8 *port_mac);
/**
* @brief Get number of VLAN filters allocated for VF by ecore
@@ -79,12 +81,15 @@ void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
/**
* @brief Get number of MAC filters allocated for VF by ecore
*
- * @param p_hwfn
- * @param num_mac - allocated MAC filters
+ * @param p_hwfn
+ * @param num_mac_filters - allocated MAC filters
*/
void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
u32 *num_mac_filters);
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs);
+
/**
* @brief Check if VF can set a MAC address
*
@@ -95,13 +100,14 @@ void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
*/
bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac);
+#ifndef LINUX_REMOVE
/**
* @brief Copy forced MAC address from bulletin board
*
* @param hwfn
* @param dst_mac
* @param p_is_forced - out param which indicate in case mac
- * exist if it forced or not.
+ * exist if it forced or not.
*
* @return bool - return true if mac exist and false if
* not.
@@ -120,8 +126,20 @@ bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid);
/**
- * @brief Set firmware version information in dev_info from VFs acquire response
- * tlv
+ * @brief Check if VF is based on PF whose driver is pre-fp-hsi version;
+ * This affects the fastpath implementation of the driver.
+ *
+ * @param p_hwfn
+ *
+ * @return bool - true iff PF is pre-fp-hsi version.
+ */
+bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn);
+
+#endif
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire
+ * response tlv
*
* @param p_hwfn
* @param fw_major
@@ -131,70 +149,8 @@ bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid);
*/
void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_major,
- u16 *fw_minor, u16 *fw_rev, u16 *fw_eng);
-#else
-static OSAL_INLINE enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn
- *p_hwfn,
- u8 *p_change)
-{
- return ECORE_INVAL;
-}
-
-static OSAL_INLINE void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_params
- *params)
-{
-}
-
-static OSAL_INLINE void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_state
- *link)
-{
-}
-
-static OSAL_INLINE void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
- struct
- ecore_mcp_link_capabilities
- * p_link_caps)
-{
-}
-
-static OSAL_INLINE void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
- u8 *num_rxqs)
-{
-}
-
-static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
- u8 *port_mac)
-{
-}
-
-static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
- u8 *num_vlan_filters)
-{
-}
-
-static OSAL_INLINE void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
- u32 *num_mac)
-{
-}
-
-static OSAL_INLINE bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
-{
- return false;
-}
-
-static OSAL_INLINE bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn
- *hwfn, u8 *dst_mac,
- u8 *p_is_forced)
-{
- return false;
-}
-
-static OSAL_INLINE void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
- u16 *fw_major, u16 *fw_minor,
- u16 *fw_rev, u16 *fw_eng)
-{
-}
+ u16 *fw_minor,
+ u16 *fw_rev,
+ u16 *fw_eng);
#endif
#endif
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 2fa4d155..149d092b 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -9,11 +9,9 @@
#ifndef __ECORE_VF_PF_IF_H__
#define __ECORE_VF_PF_IF_H__
+/* @@@ TBD MichalK this should be HSI? */
#define T_ETH_INDIRECTION_TABLE_SIZE 128
-#define T_ETH_RSS_KEY_SIZE 10
-#ifndef aligned_u64
-#define aligned_u64 u64
-#endif
+#define T_ETH_RSS_KEY_SIZE 10 /* @@@ TBD this should be HSI? */
/***********************************************
*
@@ -21,18 +19,18 @@
*
**/
struct vf_pf_resc_request {
- u8 num_rxqs;
- u8 num_txqs;
- u8 num_sbs;
- u8 num_mac_filters;
- u8 num_vlan_filters;
- u8 num_mc_filters; /* No limit so superfluous */
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
u16 padding;
};
struct hw_sb_info {
- u16 hw_sb_id; /* aka absolute igu id, used to ack the sb */
- u8 sb_qid; /* used to update DHC for sb */
+ u16 hw_sb_id; /* aka absolute igu id, used to ack the sb */
+ u8 sb_qid; /* used to update DHC for sb */
u8 padding[5];
};
@@ -44,29 +42,6 @@ struct hw_sb_info {
*
**/
#define TLV_BUFFER_SIZE 1024
-#define TLV_ALIGN sizeof(u64)
-#define PF_VF_BULLETIN_SIZE 512
-
-#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000
-#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001
-#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002
-#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004
-#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008
-#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010
-/* TODO: #define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020 */
-
-#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content))
-#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */
-#define BULLETIN_CRC_SEED 0
-
-enum {
- PFVF_STATUS_WAITING = 0,
- PFVF_STATUS_SUCCESS,
- PFVF_STATUS_FAILURE,
- PFVF_STATUS_NOT_SUPPORTED,
- PFVF_STATUS_NO_RESOURCE,
- PFVF_STATUS_FORCED,
-};
/* vf pf channel tlvs */
/* general tlv header (used for both vf->pf request and pf->vf response) */
@@ -81,7 +56,7 @@ struct channel_tlv {
struct vfpf_first_tlv {
struct channel_tlv tl;
u32 padding;
- aligned_u64 reply_address;
+ u64 reply_address;
};
/* header of pf->vf tlvs, carries the status of handling the request */
@@ -107,38 +82,49 @@ struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
-#define VFPF_ACQUIRE_CAP_OVERRIDE_FW_VER (1 << 0)
- aligned_u64 capabilties;
+#ifndef LINUX_REMOVE
+ /* First bit was used on 8.7.x and 8.8.x versions, which had different
+ * FWs used but with the same faspath HSI. As this was prior to the
+ * fastpath versioning, wanted to have ability to override fw matching
+ * and allow them to interact.
+ */
+#endif
+/* VF pre-FP hsi version */
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0)
+#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+ u64 capabilities;
u8 fw_major;
u8 fw_minor;
u8 fw_revision;
u8 fw_engineering;
u32 driver_version;
- u16 opaque_fid; /* ME register value */
- u8 os_type; /* VFPF_ACQUIRE_OS_* value */
- u8 padding[5];
+ u16 opaque_fid; /* ME register value */
+ u8 os_type; /* VFPF_ACQUIRE_OS_* value */
+ u8 eth_fp_hsi_major;
+ u8 eth_fp_hsi_minor;
+ u8 padding[3];
} vfdev_info;
struct vf_pf_resc_request resc_request;
- aligned_u64 bulletin_addr;
+ u64 bulletin_addr;
u32 bulletin_size;
u32 padding;
};
/* receive side scaling tlv */
struct vfpf_vport_update_rss_tlv {
- struct channel_tlv tl;
+ struct channel_tlv tl;
u8 update_rss_flags;
-#define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0)
-#define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1)
-#define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2)
-#define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3)
+ #define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0)
+ #define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1)
+ #define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2)
+ #define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3)
u8 rss_enable;
u8 rss_caps;
- u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
u32 rss_key[T_ETH_RSS_KEY_SIZE];
};
@@ -168,14 +154,27 @@ struct pfvf_acquire_resp_tlv {
u16 fw_rev;
u16 fw_eng;
- aligned_u64 capabilities;
+ u64 capabilities;
#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED (1 << 0)
+#define PFVF_ACQUIRE_CAP_100G (1 << 1) /* If set, 100g PF */
+/* There are old PF versions where the PF might mistakenly override the sanity
+ * mechanism [version-based] and allow a VF that can't be supported to pass
+ * the acquisition phase.
+ * To overcome this, PFs now indicate that they're past that point and the new
+ * VFs would fail probe on the older PFs that fail to do so.
+ */
+#ifndef LINUX_REMOVE
+/* Said bug was in quest/serpens; Can't be certain no official release included
+ * the bug since the fix arrived very late in the programs.
+ */
+#endif
+#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE (1 << 2)
u16 db_size;
- u8 indices_per_sb;
+ u8 indices_per_sb;
u8 os_type;
- /* Thesee should match the PF's ecore_dev values */
+ /* These should match the PF's ecore_dev values */
u16 chip_rev;
u8 dev_type;
@@ -184,7 +183,14 @@ struct pfvf_acquire_resp_tlv {
struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN];
- u8 padding2[2];
+
+ /* It's possible PF had to configure an older fastpath HSI
+ * [in case VF is newer than PF]. This is communicated back
+ * to the VF. It can also be used in case of error due to
+ * non-matching versions to shed light in VF about failure.
+ */
+ u8 major_fp_hsi;
+ u8 minor_fp_hsi;
} pfdev_info;
struct pf_vf_resc {
@@ -192,174 +198,168 @@ struct pfvf_acquire_resp_tlv {
* this struct with suggested amount of resources for next
* acquire request
*/
-#define PFVF_MAX_QUEUES_PER_VF 16
-#define PFVF_MAX_SBS_PER_VF 16
+ #define PFVF_MAX_QUEUES_PER_VF 16
+ #define PFVF_MAX_SBS_PER_VF 16
struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
- u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
- u8 cid[PFVF_MAX_QUEUES_PER_VF];
-
- u8 num_rxqs;
- u8 num_txqs;
- u8 num_sbs;
- u8 num_mac_filters;
- u8 num_vlan_filters;
- u8 num_mc_filters;
- u8 padding[2];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 padding[2];
} resc;
u32 bulletin_size;
u32 padding;
};
-/* Init VF */
-struct vfpf_init_tlv {
- struct vfpf_first_tlv first_tlv;
- aligned_u64 stats_addr;
-
- u16 rx_mask;
- u16 tx_mask;
- u8 drop_ttl0_flg;
- u8 padding[3];
-
+struct pfvf_start_queue_resp_tlv {
+ struct pfvf_tlv hdr;
+ u32 offset; /* offset to consumer/producer of queue */
+ u8 padding[4];
};
/* Setup Queue */
struct vfpf_start_rxq_tlv {
- struct vfpf_first_tlv first_tlv;
+ struct vfpf_first_tlv first_tlv;
/* physical addresses */
- aligned_u64 rxq_addr;
- aligned_u64 deprecated_sge_addr;
- aligned_u64 cqe_pbl_addr;
-
- u16 cqe_pbl_size;
- u16 hw_sb;
- u16 rx_qid;
- u16 hc_rate; /* desired interrupts per sec. */
-
- u16 bd_max_bytes;
- u16 stat_id;
- u8 sb_index;
- u8 padding[3];
+ u64 rxq_addr;
+ u64 deprecated_sge_addr;
+ u64 cqe_pbl_addr;
+
+ u16 cqe_pbl_size;
+ u16 hw_sb;
+ u16 rx_qid;
+ u16 hc_rate; /* desired interrupts per sec. */
+
+ u16 bd_max_bytes;
+ u16 stat_id;
+ u8 sb_index;
+ u8 padding[3];
};
struct vfpf_start_txq_tlv {
- struct vfpf_first_tlv first_tlv;
+ struct vfpf_first_tlv first_tlv;
/* physical addresses */
- aligned_u64 pbl_addr;
- u16 pbl_size;
- u16 stat_id;
- u16 tx_qid;
- u16 hw_sb;
-
- u32 flags; /* VFPF_QUEUE_FLG_X flags */
- u16 hc_rate; /* desired interrupts per sec. */
- u8 sb_index;
- u8 padding[3];
+ u64 pbl_addr;
+ u16 pbl_size;
+ u16 stat_id;
+ u16 tx_qid;
+ u16 hw_sb;
+
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 hc_rate; /* desired interrupts per sec. */
+ u8 sb_index;
+ u8 padding[3];
};
/* Stop RX Queue */
struct vfpf_stop_rxqs_tlv {
- struct vfpf_first_tlv first_tlv;
+ struct vfpf_first_tlv first_tlv;
- u16 rx_qid;
- u8 num_rxqs;
- u8 cqe_completion;
- u8 padding[4];
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 cqe_completion;
+ u8 padding[4];
};
/* Stop TX Queues */
struct vfpf_stop_txqs_tlv {
- struct vfpf_first_tlv first_tlv;
+ struct vfpf_first_tlv first_tlv;
- u16 tx_qid;
- u8 num_txqs;
- u8 padding[5];
+ u16 tx_qid;
+ u8 num_txqs;
+ u8 padding[5];
};
struct vfpf_update_rxq_tlv {
- struct vfpf_first_tlv first_tlv;
+ struct vfpf_first_tlv first_tlv;
- aligned_u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+ u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
- u16 rx_qid;
- u8 num_rxqs;
- u8 flags;
-#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0)
-#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1)
-#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2)
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 flags;
+ #define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0)
+ #define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1)
+ #define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2)
- u8 padding[4];
+ u8 padding[4];
};
/* Set Queue Filters */
struct vfpf_q_mac_vlan_filter {
u32 flags;
-#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
-#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
-#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+ #define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+ #define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+ #define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
- u8 mac[ETH_ALEN];
+ u8 mac[ETH_ALEN];
u16 vlan_tag;
- u8 padding[4];
+ u8 padding[4];
};
/* Start a vport */
struct vfpf_vport_start_tlv {
- struct vfpf_first_tlv first_tlv;
+ struct vfpf_first_tlv first_tlv;
- aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+ u64 sb_addr[PFVF_MAX_SBS_PER_VF];
- u32 tpa_mode;
- u16 dep1;
- u16 mtu;
+ u32 tpa_mode;
+ u16 dep1;
+ u16 mtu;
- u8 vport_id;
- u8 inner_vlan_removal;
+ u8 vport_id;
+ u8 inner_vlan_removal;
- u8 only_untagged;
- u8 max_buffers_per_cqe;
+ u8 only_untagged;
+ u8 max_buffers_per_cqe;
- u8 padding[4];
+ u8 padding[4];
};
/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
struct vfpf_vport_update_activate_tlv {
- struct channel_tlv tl;
- u8 update_rx;
- u8 update_tx;
- u8 active_rx;
- u8 active_tx;
+ struct channel_tlv tl;
+ u8 update_rx;
+ u8 update_tx;
+ u8 active_rx;
+ u8 active_tx;
};
struct vfpf_vport_update_tx_switch_tlv {
- struct channel_tlv tl;
- u8 tx_switching;
- u8 padding[3];
+ struct channel_tlv tl;
+ u8 tx_switching;
+ u8 padding[3];
};
struct vfpf_vport_update_vlan_strip_tlv {
- struct channel_tlv tl;
- u8 remove_vlan;
- u8 padding[3];
+ struct channel_tlv tl;
+ u8 remove_vlan;
+ u8 padding[3];
};
struct vfpf_vport_update_mcast_bin_tlv {
- struct channel_tlv tl;
- u8 padding[4];
+ struct channel_tlv tl;
+ u8 padding[4];
- aligned_u64 bins[8];
+ u64 bins[8];
};
struct vfpf_vport_update_accept_param_tlv {
struct channel_tlv tl;
- u8 update_rx_mode;
- u8 update_tx_mode;
- u8 rx_accept_filter;
- u8 tx_accept_filter;
+ u8 update_rx_mode;
+ u8 update_tx_mode;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
};
struct vfpf_vport_update_accept_any_vlan_tlv {
@@ -371,29 +371,29 @@ struct vfpf_vport_update_accept_any_vlan_tlv {
};
struct vfpf_vport_update_sge_tpa_tlv {
- struct channel_tlv tl;
+ struct channel_tlv tl;
- u16 sge_tpa_flags;
-#define VFPF_TPA_IPV4_EN_FLAG (1 << 0)
-#define VFPF_TPA_IPV6_EN_FLAG (1 << 1)
-#define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
-#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
-#define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
+ u16 sge_tpa_flags;
+ #define VFPF_TPA_IPV4_EN_FLAG (1 << 0)
+ #define VFPF_TPA_IPV6_EN_FLAG (1 << 1)
+ #define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
+ #define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
+ #define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
- u8 update_sge_tpa_flags;
-#define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
-#define VFPF_UPDATE_TPA_EN_FLAG (1 << 1)
-#define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)
+ u8 update_sge_tpa_flags;
+ #define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
+ #define VFPF_UPDATE_TPA_EN_FLAG (1 << 1)
+ #define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)
- u8 max_buffers_per_cqe;
+ u8 max_buffers_per_cqe;
- u16 deprecated_sge_buff_size;
- u16 tpa_max_size;
- u16 tpa_min_size_to_start;
- u16 tpa_min_size_to_cont;
+ u16 deprecated_sge_buff_size;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
- u8 tpa_max_aggs_num;
- u8 padding[7];
+ u8 tpa_max_aggs_num;
+ u8 padding[7];
};
@@ -405,15 +405,15 @@ struct vfpf_vport_update_tlv {
};
struct vfpf_ucast_filter_tlv {
- struct vfpf_first_tlv first_tlv;
+ struct vfpf_first_tlv first_tlv;
- u8 opcode;
- u8 type;
+ u8 opcode;
+ u8 type;
- u8 mac[ETH_ALEN];
+ u8 mac[ETH_ALEN];
- u16 vlan;
- u16 padding[3];
+ u16 vlan;
+ u16 padding[3];
};
struct tlv_buffer_size {
@@ -421,26 +421,24 @@ struct tlv_buffer_size {
};
union vfpf_tlvs {
- struct vfpf_first_tlv first_tlv;
- struct vfpf_acquire_tlv acquire;
- struct vfpf_init_tlv init;
- struct vfpf_start_rxq_tlv start_rxq;
- struct vfpf_start_txq_tlv start_txq;
- struct vfpf_stop_rxqs_tlv stop_rxqs;
- struct vfpf_stop_txqs_tlv stop_txqs;
- struct vfpf_update_rxq_tlv update_rxq;
- struct vfpf_vport_start_tlv start_vport;
- struct vfpf_vport_update_tlv vport_update;
- struct vfpf_ucast_filter_tlv ucast_filter;
- struct channel_list_end_tlv list_end;
- struct tlv_buffer_size tlv_buf_size;
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_start_rxq_tlv start_rxq;
+ struct vfpf_start_txq_tlv start_txq;
+ struct vfpf_stop_rxqs_tlv stop_rxqs;
+ struct vfpf_stop_txqs_tlv stop_txqs;
+ struct vfpf_update_rxq_tlv update_rxq;
+ struct vfpf_vport_start_tlv start_vport;
+ struct vfpf_vport_update_tlv vport_update;
+ struct vfpf_ucast_filter_tlv ucast_filter;
+ struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
- struct pfvf_def_resp_tlv default_resp;
- struct pfvf_acquire_resp_tlv acquire_resp;
- struct channel_list_end_tlv list_end;
- struct tlv_buffer_size tlv_buf_size;
+ struct pfvf_def_resp_tlv default_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct tlv_buffer_size tlv_buf_size;
+ struct pfvf_start_queue_resp_tlv queue_start;
};
/* This is a structure which is allocated in the VF, which the PF may update
@@ -469,20 +467,19 @@ enum ecore_bulletin_bit {
};
struct ecore_bulletin_content {
- u32 crc; /* crc of structure to ensure is not in
- * mid-update
- */
+ /* crc of structure to ensure is not in mid-update */
+ u32 crc;
+
u32 version;
- aligned_u64 valid_bitmap; /* bitmap indicating wich fields
- * hold valid values
- */
+ /* bitmap indicating which fields hold valid values */
+ u64 valid_bitmap;
- u8 mac[ETH_ALEN]; /* used for MAC_ADDR or MAC_ADDR_FORCED */
+ /* used for MAC_ADDR or MAC_ADDR_FORCED */
+ u8 mac[ETH_ALEN];
- u8 default_only_untagged; /* If valid, 1 => only untagged Rx
- * if no vlan filter is configured.
- */
+ /* If valid, 1 => only untagged Rx if no vlan is configured */
+ u8 default_only_untagged;
u8 padding;
/* The following is a 'copy' of ecore_mcp_link_state,
@@ -529,11 +526,10 @@ struct ecore_bulletin {
u32 size;
};
-#ifndef print_enum
enum {
/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
- CHANNEL_TLV_NONE, /* ends tlv sequence */
+ CHANNEL_TLV_NONE, /* ends tlv sequence */
CHANNEL_TLV_ACQUIRE,
CHANNEL_TLV_VPORT_START,
CHANNEL_TLV_VPORT_UPDATE,
@@ -556,35 +552,15 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
- CHANNEL_TLV_MAX
+ CHANNEL_TLV_MAX,
+
+ /* Required for iterating over vport-update tlvs.
+ * Will break in case non-sequential vport-update tlvs.
+ */
+ CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
+
/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
};
extern const char *ecore_channel_tlvs_string[];
-#else
-print_enum(channel_tlvs, CHANNEL_TLV_NONE, /* ends tlv sequence */
- CHANNEL_TLV_ACQUIRE,
- CHANNEL_TLV_VPORT_START,
- CHANNEL_TLV_VPORT_UPDATE,
- CHANNEL_TLV_VPORT_TEARDOWN,
- CHANNEL_TLV_SETUP_RXQ,
- CHANNEL_TLV_SETUP_TXQ,
- CHANNEL_TLV_STOP_RXQS,
- CHANNEL_TLV_STOP_TXQS,
- CHANNEL_TLV_UPDATE_RXQ,
- CHANNEL_TLV_INT_CLEANUP,
- CHANNEL_TLV_CLOSE,
- CHANNEL_TLV_RELEASE,
- CHANNEL_TLV_LIST_END,
- CHANNEL_TLV_UCAST_FILTER,
- CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
- CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
- CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
- CHANNEL_TLV_VPORT_UPDATE_MCAST,
- CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
- CHANNEL_TLV_VPORT_UPDATE_RSS,
- CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
- CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_MAX);
-#endif
-
#endif /* __ECORE_VF_PF_IF_H__ */
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 046bbb20..32130709 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -11,60 +11,103 @@
/********************/
/* ETH FW CONSTANTS */
/********************/
+
+/* FP HSI version. FP HSI is compatible if (fwVer.major == drvVer.major &&
+ * fwVer.minor >= drvVer.minor)
+ */
+/* ETH FP HSI Major version */
+#define ETH_HSI_VER_MAJOR 3
+/* ETH FP HSI Minor version */
+#define ETH_HSI_VER_MINOR 10
+
+/* Alias for 8.7.x.x/8.8.x.x ETH FP HSI MINOR version. In this version driver
+ * is not required to set pkt_len field in eth_tx_1st_bd struct, and tunneling
+ * offload is not supported.
+ */
+#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+
#define ETH_CACHE_LINE_SIZE 64
-#define ETH_RX_CQE_GAP 32
-#define ETH_MAX_RAMROD_PER_CON 8
-#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
-#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
-#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
-#define ETH_RX_NUM_NEXT_PAGE_BDS 2
-
-#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
-#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
-#define ETH_TX_MAX_LSO_HDR_NBD 4
-#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
-#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
-#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
-#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
-#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8))
+#define ETH_RX_CQE_GAP 32
+#define ETH_MAX_RAMROD_PER_CON 8
+#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
+#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
+#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+
+#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
+#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
+#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
+#define ETH_TX_MAX_LSO_HDR_NBD 4
+#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
+#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
+#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+/* (QM_REG_TASKBYTECRDCOST_0, QM_VOQ_BYTE_CRD_TASK_COST) -
+ * (VLAN-TAG + CRC + IPG + PREAMBLE)
+ */
+#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510
-#define ETH_TX_LSO_WINDOW_BDS_NUM 18
+/* Number of BDs to consider for LSO sliding window restriction is
+ * (ETH_TX_LSO_WINDOW_BDS_NUM - hdr_nbd)
+ */
+#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
+/* Minimum data length (in bytes) in LSO sliding window */
#define ETH_TX_LSO_WINDOW_MIN_LEN 9700
-#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFFFF
-
-#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
-
+/* Maximum LSO packet TCP payload length (in bytes) */
+#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000
+/* Number of same-as-last resources in tx switching */
+#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320
+/* Value for a connection for which same as last feature is disabled */
+#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF
+
+/* Maximum number of statistics counters */
+#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+/* Maximum number of statistics counters when doubled VF zone used */
+#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2)
+/* Maximum number of statistics counters when quad VF zone used */
+#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
+ (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
+
+/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
/* num of MAC/VLAN filters */
-#define ETH_NUM_MAC_FILTERS 512
-#define ETH_NUM_VLAN_FILTERS 512
+#define ETH_NUM_MAC_FILTERS 512
+#define ETH_NUM_VLAN_FILTERS 512
/* approx. multicast constants */
-#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
-#define ETH_MULTICAST_MAC_BINS 256
-#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
+/* CRC seed for multicast bin calculation */
+#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
+#define ETH_MULTICAST_MAC_BINS 256
+#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
/* ethernet vport update constants */
-#define ETH_FILTER_RULES_COUNT 10
-#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
-#define ETH_RSS_KEY_SIZE_REGS 10
+#define ETH_FILTER_RULES_COUNT 10
+/* number of RSS indirection table entries, per Vport) */
+#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
+/* Length of RSS key (in regs) */
+#define ETH_RSS_KEY_SIZE_REGS 10
+/* number of available RSS engines in K2 */
#define ETH_RSS_ENGINE_NUM_K2 207
+/* number of available RSS engines in BB */
#define ETH_RSS_ENGINE_NUM_BB 127
/* TPA constants */
+/* Maximum number of open TPA aggregations */
#define ETH_TPA_MAX_AGGS_NUM 64
+/* Maximum number of additional buffers, reported by TPA-start CQE */
#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
+/* Maximum number of buffers, reported by TPA-continue CQE */
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+/* Maximum number of buffers, reported by TPA-end CQE */
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
-/*
- * Interrupt coalescing TimeSet
- */
-struct coalescing_timeset {
- u8 timeset;
- u8 valid /* Only if this flag is set, timeset will take effect */;
-};
+/* Control frame check constants */
+/* Number of etherType values configured by driver for control frame check */
+#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+
+
/*
* Destination port mode
@@ -77,6 +120,7 @@ enum dest_port_mode {
MAX_DEST_PORT_MODE
};
+
/*
* Ethernet address type
*/
@@ -88,22 +132,31 @@ enum eth_addr_type {
MAX_ETH_ADDR_TYPE
};
+
struct eth_tx_1st_bd_flags {
u8 bitfields;
+/* Set to 1 in the first BD. (for debug) */
#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
+/* Do not allow additional VLAN manipulations on this packet. */
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
+/* IP checksum recalculation in needed */
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
+/* TCP/UDP checksum recalculation in needed */
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
+/* If set, need to add the VLAN in vlan field to the packet. */
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
+/* If set, this is an LSO packet. */
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
+/* Recalculate Tunnel IP Checksum (if Tunnel IP Header is IPv4) */
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
+/* Recalculate Tunnel UDP/GRE Checksum (Depending on Tunnel Type) */
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
};
@@ -112,48 +165,71 @@ struct eth_tx_1st_bd_flags {
* The parsing information data for the first tx bd of a given packet.
*/
struct eth_tx_data_1st_bd {
- __le16 vlan /* VLAN to insert to packet (if needed). */;
- /* Number of BDs in packet. Should be at least 2 in non-LSO
- * packet and at least 3 in LSO (or Tunnel with IPv6+ext) packet.
- */
+ __le16 vlan /* VLAN tag to insert to packet (if needed). */;
+/* Number of BDs in packet. Should be at least 2 in non-LSO packet and at least
+ * 3 in LSO (or Tunnel with IPv6+ext) packet.
+ */
u8 nbds;
struct eth_tx_1st_bd_flags bd_flags;
__le16 bitfields;
-#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1
-#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0
-#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
-#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
-#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF
-#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2
+/* Indicates a tunneled packet. Must be set for encapsulated packet. */
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
+#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
+#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
+/* Total packet length - must be filled for non-LSO packets. */
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF
+#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
};
/*
* The parsing information data for the second tx bd of a given packet.
*/
struct eth_tx_data_2nd_bd {
+/* For tunnel with IPv6+ext - Tunnel header IP datagram length (in BYTEs) */
__le16 tunn_ip_size;
__le16 bitfields1;
+/* For Tunnel header with IPv6 ext. - Inner L2 Header Size (in 2-byte WORDs) */
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
+/* For Tunnel header with IPv6 ext. - Inner L2 Header MAC DA Type
+ * (use enum eth_addr_type)
+ */
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
+/* Destination port mode. (use enum dest_port_mode) */
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+/* Should be 0 in all the BDs, except the first one. (for debug) */
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
+/* For Tunnel header with IPv6 ext. - Tunnel Type (use enum eth_tx_tunn_type) */
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
+/* For LSO / Tunnel header with IPv6+ext - Set if inner header is IPv6 */
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
+/* For LSO / Tunnel header with IPv6+ext - Set if outer header has IPv6+ext */
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
+/* Set if Tunnel header has IPv6 ext. (3rd BD is required) */
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
+/* Set if (inner) L4 protocol is UDP. (Required when IPv6+ext (or tunnel with
+ * inner or outer Ipv6+ext) and l4_csum is set)
+ */
#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1
#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
+/* The pseudo header checksum type in the L4 checksum field. Required when
+ * IPv6+ext and l4_csum is set. (use enum eth_l4_pseudo_checksum_mode)
+ */
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
__le16 bitfields2;
+/* For inner/outer header IPv6+ext - (inner) L4 header offset (in 2-byte WORDs).
+ * For regular packet - offset from the beginning of the packet. For tunneled
+ * packet - offset from the beginning of the inner header
+ */
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
@@ -164,36 +240,63 @@ struct eth_tx_data_2nd_bd {
* Firmware data for L2-EDPM packet.
*/
struct eth_edpm_fw_data {
- struct eth_tx_data_1st_bd data_1st_bd
- /* Parsing information data from the 1st BD. */;
- struct eth_tx_data_2nd_bd data_2nd_bd
- /* Parsing information data from the 2nd BD. */;
+/* Parsing information data from the 1st BD. */
+ struct eth_tx_data_1st_bd data_1st_bd;
+/* Parsing information data from the 2nd BD. */
+ struct eth_tx_data_2nd_bd data_2nd_bd;
__le32 reserved;
};
+
/*
* FW debug.
*/
struct eth_fast_path_cqe_fw_debug {
- u8 reserved0 /* FW reserved. */;
- u8 reserved1 /* FW reserved. */;
__le16 reserved2 /* FW reserved. */;
};
-struct tunnel_parsing_flags {
+
+/*
+ * tunneling parsing flags
+ */
+struct eth_tunnel_parsing_flags {
u8 flags;
-#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
-#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
-#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
-#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
-#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
-#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
-#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
-#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
-#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+/* 0 - no tunneling, 1 - GENEVE, 2 - GRE, 3 - VXLAN
+ * (use enum eth_rx_tunn_type)
+ */
+#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
+#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
+/* If it s not an encapsulated packet then put 0x0. If it s an encapsulated
+ * packet but the tenant-id doesn t exist then put 0x0. Else put 0x1
+ *
+ */
+#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
+/* Type of the next header above the tunneling: 0 - unknown, 1 - L2, 2 - Ipv4,
+ * 3 - IPv6 (use enum tunnel_next_protocol)
+ */
+#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3
+#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
+/* The result of comparing the DA-ip of the tunnel header. */
+#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
+#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
+};
+
+/*
+ * PMD flow control bits
+ */
+struct eth_pmd_flow_flags {
+ u8 flags;
+#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 /* CQE valid bit */
+#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 /* CQE ring toggle bit */
+#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
+#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
+#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
};
/*
@@ -202,47 +305,47 @@ struct tunnel_parsing_flags {
struct eth_fast_path_rx_reg_cqe {
u8 type /* CQE type */;
u8 bitfields;
+/* Type of calculated RSS hash (use enum rss_hash_type) */
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
+/* Traffic Class */
#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF
#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
__le16 pkt_len /* Total packet length (from the parser) */;
- struct parsing_and_err_flags pars_flags
- /* Parsing and error flags from the parser */;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags pars_flags;
__le16 vlan_tag /* 802.1q VLAN tag */;
__le32 rss_hash /* RSS hash result */;
__le16 len_on_first_bd /* Number of bytes placed on first BD */;
u8 placement_offset /* Offset of placement from BD start */;
- struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */
- ;
+/* Tunnel Parsing Flags */
+ struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num /* Number of BDs, used for packet */;
- u8 reserved[7];
+ u8 reserved[9];
struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
u8 reserved1[3];
- u8 flags;
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1
-#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F
-#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
+
/*
* TPA-continue ETH Rx FP CQE.
*/
struct eth_fast_path_rx_tpa_cont_cqe {
u8 type /* CQE type */;
u8 tpa_agg_index /* TPA aggregation index */;
- __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]
- /* List of the segment sizes */;
- u8 reserved[5];
+/* List of the segment sizes */
+ __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE];
+ u8 reserved;
u8 reserved1 /* FW reserved. */;
__le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* FW reserved. */;
+ u8 reserved3[3];
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
+
/*
* TPA-end ETH Rx FP CQE .
*/
@@ -251,64 +354,72 @@ struct eth_fast_path_rx_tpa_end_cqe {
u8 tpa_agg_index /* TPA aggregation index */;
__le16 total_packet_len /* Total aggregated packet length */;
u8 num_of_bds /* Total number of BDs comprising the packet */;
- u8 end_reason /* Aggregation end reason. Use enum eth_tpa_end_reason */
- ;
+/* Aggregation end reason. Use enum eth_tpa_end_reason */
+ u8 end_reason;
__le16 num_of_coalesced_segs /* Number of coalesced TCP segments */;
__le32 ts_delta /* TCP timestamp delta */;
- __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]
- /* List of the segment sizes */;
- u8 reserved1[3];
- u8 reserved2 /* FW reserved. */;
+/* List of the segment sizes */
+ __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE];
__le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* FW reserved. */;
+ __le16 reserved1;
+ u8 reserved2 /* FW reserved. */;
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
+
/*
* TPA-start ETH Rx FP CQE.
*/
struct eth_fast_path_rx_tpa_start_cqe {
u8 type /* CQE type */;
u8 bitfields;
+/* Type of calculated RSS hash (use enum rss_hash_type) */
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
+/* Traffic Class */
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
__le16 seg_len /* Segment length (packetLen from the parser) */;
- struct parsing_and_err_flags pars_flags
- /* Parsing and error flags from the parser */;
+/* Parsing and error flags from the parser */
+ struct parsing_and_err_flags pars_flags;
__le16 vlan_tag /* 802.1q VLAN tag */;
__le32 rss_hash /* RSS hash result */;
__le16 len_on_first_bd /* Number of bytes placed on first BD */;
u8 placement_offset /* Offset of placement from BD start */;
- struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */
- ;
+/* Tunnel Parsing Flags */
+ struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 tpa_agg_index /* TPA aggregation index */;
u8 header_len /* Packet L2+L3+L4 header length */;
- __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]
- /* Additional BDs length list. */;
+/* Additional BDs length list. */
+ __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
+ u8 reserved;
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
+
/*
* The L4 pseudo checksum mode for Ethernet
*/
enum eth_l4_pseudo_checksum_mode {
- ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH
- /* Pseudo Header checksum on packet is calculated
- * with the correct packet length field.
- */
- ,
- ETH_L4_PSEUDO_CSUM_ZERO_LENGTH
- /* Pseudo Hdr checksum on packet is calc with zero len field. */
- ,
+/* Pseudo Header checksum on packet is calculated with the correct packet length
+ * field.
+ */
+ ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+/* Pseudo Header checksum on packet is calculated with zero length field. */
+ ETH_L4_PSEUDO_CSUM_ZERO_LENGTH,
MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
};
+
+
struct eth_rx_bd {
struct regpair addr /* single continues buffer */;
};
+
/*
* regular ETH Rx SP CQE
*/
@@ -319,29 +430,25 @@ struct eth_slow_path_rx_cqe {
u8 reserved[25];
__le16 echo;
u8 reserved1;
- u8 flags;
-#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1
-#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0
-#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1
-#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1
-#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F
-#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2
+ struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
/*
* union for all ETH Rx CQE types
*/
union eth_rx_cqe {
- struct eth_fast_path_rx_reg_cqe fast_path_regular /* Regular FP CQE */;
- struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start
- /* TPA-start CQE */;
- struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont
- /* TPA-continue CQE */;
- struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end /* TPA-end CQE */
- ;
+/* Regular FP CQE */
+ struct eth_fast_path_rx_reg_cqe fast_path_regular;
+/* TPA-start CQE */
+ struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
+/* TPA-continue CQE */
+ struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont;
+/* TPA-end CQE */
+ struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end;
struct eth_slow_path_rx_cqe slow_path /* SP CQE */;
};
+
/*
* ETH Rx CQE type
*/
@@ -355,51 +462,59 @@ enum eth_rx_cqe_type {
MAX_ETH_RX_CQE_TYPE
};
+
/*
- * Wrapp for PD RX CQE used in order to cover full cache line when writing CQE
+ * Wrapper for PD RX CQE - used in order to cover full cache line when writing
+ * CQE
*/
struct eth_rx_pmd_cqe {
union eth_rx_cqe cqe /* CQE data itself */;
u8 reserved[ETH_RX_CQE_GAP];
};
+
/*
- * ETH Rx producers data
+ * Eth RX Tunnel Type
*/
-struct eth_rx_prod_data {
- __le16 bd_prod /* BD producer */;
- __le16 cqe_prod /* CQE producer */;
- __le16 reserved;
- __le16 reserved1 /* FW reserved. */;
+enum eth_rx_tunn_type {
+ ETH_RX_NO_TUNN /* No Tunnel. */,
+ ETH_RX_TUNN_GENEVE /* GENEVE Tunnel. */,
+ ETH_RX_TUNN_GRE /* GRE Tunnel. */,
+ ETH_RX_TUNN_VXLAN /* VXLAN Tunnel. */,
+ MAX_ETH_RX_TUNN_TYPE
};
+
+
/*
* Aggregation end reason.
*/
enum eth_tpa_end_reason {
ETH_AGG_END_UNUSED,
ETH_AGG_END_SP_UPDATE /* SP configuration update */,
- ETH_AGG_END_MAX_LEN
- /* Maximum aggregation length or maximum buffer number used. */,
- ETH_AGG_END_LAST_SEG
- /* TCP PSH flag or TCP payload length below continue threshold. */,
+/* Maximum aggregation length or maximum buffer number used. */
+ ETH_AGG_END_MAX_LEN,
+/* TCP PSH flag or TCP payload length below continue threshold. */
+ ETH_AGG_END_LAST_SEG,
ETH_AGG_END_TIMEOUT /* Timeout expiration. */,
+/* Packet header not consistency: different IPv4 TOS, TTL or flags, IPv6 TC,
+ * Hop limit or Flow label, TCP header length or TS options. In GRO different
+ * TS value, SMAC, DMAC, ackNum, windowSize or VLAN
+ */
ETH_AGG_END_NOT_CONSISTENT,
+/* Out of order or retransmission packet: sequence, ack or timestamp not
+ * consistent with previous segment.
+ */
ETH_AGG_END_OUT_OF_ORDER,
+/* Next segment cant be aggregated due to LLC/SNAP, IP error, IP fragment, IPv4
+ * options, IPv6 extension, IP ECN = CE, TCP errors, TCP options, zero TCP
+ * payload length , TCP flags or not supported tunnel header options.
+ */
ETH_AGG_END_NON_TPA_SEG,
MAX_ETH_TPA_END_REASON
};
-/*
- * Eth Tunnel Type
- */
-enum eth_tunn_type {
- ETH_TUNN_GENEVE /* GENEVE Tunnel. */,
- ETH_TUNN_TTAG /* T-Tag Tunnel. */,
- ETH_TUNN_GRE /* GRE Tunnel. */,
- ETH_TUNN_VXLAN /* VXLAN Tunnel. */,
- MAX_ETH_TUNN_TYPE
-};
+
/*
* The first tx bd of a given packet
@@ -410,6 +525,8 @@ struct eth_tx_1st_bd {
struct eth_tx_data_1st_bd data /* Parsing information data. */;
};
+
+
/*
* The second tx bd of a given packet
*/
@@ -419,21 +536,29 @@ struct eth_tx_2nd_bd {
struct eth_tx_data_2nd_bd data /* Parsing information data. */;
};
+
/*
* The parsing information data for the third tx bd of a given packet.
*/
struct eth_tx_data_3rd_bd {
__le16 lso_mss /* For LSO packet - the MSS in bytes. */;
__le16 bitfields;
+/* For LSO with inner/outer IPv6+ext - TCP header length (in 4-byte WORDs) */
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
+/* LSO - number of BDs which contain headers. value should be in range
+ * (1..ETH_TX_MAX_LSO_HDR_NBD).
+ */
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF
#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
+/* Should be 0 in all the BDs, except the first one. (for debug) */
#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
+/* For tunnel with IPv6+ext - Pointer to tunnel L4 Header (in 2-byte WORDs) */
u8 tunn_l4_hdr_start_offset_w;
+/* For tunnel with IPv6+ext - Total size of Tunnel Header (in 2-byte WORDs) */
u8 tunn_hdr_size_w;
};
@@ -446,6 +571,7 @@ struct eth_tx_3rd_bd {
struct eth_tx_data_3rd_bd data /* Parsing information data. */;
};
+
/*
* Complementary information for the regular tx bd of a given packet.
*/
@@ -454,6 +580,7 @@ struct eth_tx_data_bd {
__le16 bitfields;
#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
+/* Should be 0 in all the BDs, except the first one. (for debug) */
#define ETH_TX_DATA_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
@@ -470,55 +597,61 @@ struct eth_tx_bd {
struct eth_tx_data_bd data /* Complementary information. */;
};
+
union eth_tx_bd_types {
struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */;
- struct eth_tx_2nd_bd second_bd /* The second tx bd of a given packet */
- ;
+/* The second tx bd of a given packet */
+ struct eth_tx_2nd_bd second_bd;
struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */;
struct eth_tx_bd reg_bd /* The common non-special bd */;
};
-/*
- * Mstorm Queue Zone
- */
-struct mstorm_eth_queue_zone {
- struct eth_rx_prod_data rx_producers;
- __le32 reserved[2];
-};
+
+
+
+
/*
- * Ustorm Queue Zone
+ * Eth Tx Tunnel Type
*/
-struct ustorm_eth_queue_zone {
- struct coalescing_timeset int_coalescing_timeset
- /* Rx interrupt coalescing TimeSet */;
- __le16 reserved[3];
+enum eth_tx_tunn_type {
+ ETH_TX_TUNN_GENEVE /* GENEVE Tunnel. */,
+ ETH_TX_TUNN_TTAG /* T-Tag Tunnel. */,
+ ETH_TX_TUNN_GRE /* GRE Tunnel. */,
+ ETH_TX_TUNN_VXLAN /* VXLAN Tunnel. */,
+ MAX_ETH_TX_TUNN_TYPE
};
+
/*
* Ystorm Queue Zone
*/
-struct ystorm_eth_queue_zone {
- struct coalescing_timeset int_coalescing_timeset
- /* Tx interrupt coalescing TimeSet */;
- __le16 reserved[3];
+struct xstorm_eth_queue_zone {
+/* Tx interrupt coalescing TimeSet */
+ struct coalescing_timeset int_coalescing_timeset;
+ u8 reserved[7];
};
+
/*
* ETH doorbell data
*/
struct eth_db_data {
u8 params;
+/* destination of doorbell (use enum db_dest) */
#define ETH_DB_DATA_DEST_MASK 0x3
#define ETH_DB_DATA_DEST_SHIFT 0
+/* aggregative command to CM (use enum db_agg_cmd_sel) */
#define ETH_DB_DATA_AGG_CMD_MASK 0x3
#define ETH_DB_DATA_AGG_CMD_SHIFT 2
-#define ETH_DB_DATA_BYPASS_EN_MASK 0x1
+#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
#define ETH_DB_DATA_RESERVED_MASK 0x1
#define ETH_DB_DATA_RESERVED_SHIFT 5
+/* aggregative value selection */
#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3
#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
+/* bit for every DQ counter flags in CM context that DQ can increment */
u8 agg_flags;
__le16 bd_prod;
};
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 71922654..96efc3c8 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -26,7 +26,7 @@
#define MCP_GLOB_PORT_MAX 4 /* Global */
#define MCP_GLOB_FUNC_MAX 16 /* Global */
-typedef u32 offsize_t; /* In DWORDS !!! */
+typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
#define OFFSIZE_OFFSET_SHIFT 0
#define OFFSIZE_OFFSET_MASK 0x0000ffff
@@ -35,48 +35,62 @@ typedef u32 offsize_t; /* In DWORDS !!! */
#define OFFSIZE_SIZE_MASK 0xffff0000
/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
-#define SECTION_OFFSET(_offsize) \
-((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
+#define SECTION_OFFSET(_offsize) \
+ ((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
/* SECTION_SIZE is calculating the size in bytes out of offsize */
-#define SECTION_SIZE(_offsize) \
-(((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
+#define SECTION_SIZE(_offsize) \
+ (((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
-#define SECTION_ADDR(_offsize, idx) \
-(MCP_REG_SCRATCH + SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx))
+/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
+ * within section
+ */
+#define SECTION_ADDR(_offsize, idx) \
+ (MCP_REG_SCRATCH + \
+ SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx))
+/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address. Use
+ * offsetof, since the OFFSETUP collide with the firmware definition
+ */
#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
-(_pub_base + offsetof(struct mcp_public_data, sections[_section]))
-
+ (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
/* PHY configuration */
-struct pmm_phy_cfg {
- u32 speed; /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
-#define PMM_SPEED_AUTONEG 0
-#define PMM_SPEED_SMARTLINQ 0x8
-
- u32 pause; /* bitmask */
-#define PMM_PAUSE_NONE 0x0
-#define PMM_PAUSE_AUTONEG 0x1
-#define PMM_PAUSE_RX 0x2
-#define PMM_PAUSE_TX 0x4
-
- u32 adv_speed; /* Default should be the speed_cap_mask */
+struct eth_phy_cfg {
+/* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0
+#define ETH_SPEED_SMARTLINQ 0x8
+
+ u32 pause; /* bitmask */
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed; /* Default should be the speed_cap_mask */
u32 loopback_mode;
-#define PMM_LOOPBACK_NONE 0
-#define PMM_LOOPBACK_INT_PHY 1
-#define PMM_LOOPBACK_EXT_PHY 2
-#define PMM_LOOPBACK_EXT 3
-#define PMM_LOOPBACK_MAC 4
-#define PMM_LOOPBACK_CNIG_AH_ONLY_0123 5 /* Port to itself */
-#define PMM_LOOPBACK_CNIG_AH_ONLY_2301 6 /* Port to Port */
+#define ETH_LOOPBACK_NONE (0)
+/* Serdes loopback. In AH, it refers to Near End */
+#define ETH_LOOPBACK_INT_PHY (1)
+#define ETH_LOOPBACK_EXT_PHY (2) /* External PHY Loopback */
+/* External Loopback (Require loopback plug) */
+#define ETH_LOOPBACK_EXT (3)
+#define ETH_LOOPBACK_MAC (4) /* MAC Loopback - not supported */
+#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 (5) /* Port to itself */
+#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 (6) /* Port to Port */
+#define ETH_LOOPBACK_PCS_AH_ONLY (7) /* PCS loopback (TX to RX) */
+/* Loop RX packet from PCS to TX */
+#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY (8)
+/* Remote Serdes Loopback (RX to TX) */
+#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY (9)
/* features */
u32 feature_config_flags;
-
+#define ETH_EEE_MODE_ADV_LPI (1 << 0)
};
struct port_mf_cfg {
- u32 dynamic_cfg; /* device control channel */
+ u32 dynamic_cfg; /* device control channel */
#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
#define PORT_MF_CFG_OV_TAG_SHIFT 0
#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
@@ -87,52 +101,66 @@ struct port_mf_cfg {
/* DO NOT add new fields in the middle
* MUST be synced with struct pmm_stats_map
*/
-struct pmm_stats {
- u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter */
- u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter */
- u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter */
- u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter */
- u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter */
- u64 r1518; /* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
- u64 r1522; /* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged */
- u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter */
- u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter */
- u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter */
- u64 r16383; /* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame ctr */
- u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter */
- u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter */
- u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter */
- u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter */
- u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter */
- u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
- u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter */
- u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
- u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
- u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
- u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
+struct eth_stats {
+ u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
+ u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
+ u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter*/
+ u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter*/
+ u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter*/
+/* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
+ u64 r1518;
+/* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged frame counter */
+ u64 r1522;
+ u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
+ u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
+ u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+/* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame counter */
+ u64 r16383;
+ u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
+ u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
+ u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
+ u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
+ u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
+ u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
+ u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
+ u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
+ u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
+ u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
+ u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
u64 t127; /* 0x41 (Offset 0xb0 ) TX 65 to 127 byte frame counter */
- u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter */
- u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter */
- u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter */
- u64 t1518; /* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
- u64 t2047; /* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
- u64 t4095; /* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
- u64 t9216; /* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
- u64 t16383; /* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame ctr */
- u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
- u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
- u64 tlpiec; /* 0x6C (Offset 0x108) Transmit Logical Type LLFC */
+ u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter*/
+ u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter*/
+ u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter*/
+/* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
+ u64 t1518;
+/* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
+ u64 t2047;
+/* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
+ u64 t4095;
+/* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
+ u64 t9216;
+/* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame counter */
+ u64 t16383;
+ u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
+ u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
+/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
+ u64 tlpiec;
u64 tncl; /* 0x6E (Offset 0x110) Transmit Total Collision Counter */
- u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
- u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
- u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
- u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
- u64 rxpok; /* 0x22 (Offset 0x138) RX good frame */
- u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
- u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
- u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
- u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
- u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+ u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
+ u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
+ u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
+ u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
+/* 0x22 (Offset 0x138) RX good frame (good CRC, not oversized, no ERROR) */
+ u64 rxpok;
+ u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
+ u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
+ u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
+ u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
+ u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+/* HSI - Cannot add more stats to this struct. If needed, then need to open new
+ * struct
+ */
+
};
struct brb_stats {
@@ -142,28 +170,29 @@ struct brb_stats {
struct port_stats {
struct brb_stats brb;
- struct pmm_stats pmm;
+ struct eth_stats eth;
};
-/*-----+-----------------------------------------------------------------------
- * Chip | Number and | Ports in| Ports in|2 PHY-s |# of ports|# of engines
- * | rate of physical | team #1 | team #2 |are used|per path | (paths)
- * | ports | | | | |
- *======+==================+=========+=========+========+======================
- * BB | 1x100G | This is special mode, where there are 2 HW func
- * BB | 2x10/20Gbps | 0,1 | NA | No | 1 | 1
- * BB | 2x40 Gbps | 0,1 | NA | Yes | 1 | 1
- * BB | 2x50Gbps | 0,1 | NA | No | 1 | 1
- * BB | 4x10Gbps | 0,2 | 1,3 | No | 1/2 | 1,2
- * BB | 4x10Gbps | 0,1 | 2,3 | No | 1/2 | 1,2
- * BB | 4x10Gbps | 0,3 | 1,2 | No | 1/2 | 1,2
- * BB | 4x10Gbps | 0,1,2,3 | NA | No | 1 | 1
- * AH | 2x10/20Gbps | 0,1 | NA | NA | 1 | NA
- * AH | 4x10Gbps | 0,1 | 2,3 | NA | 2 | NA
- * AH | 4x10Gbps | 0,2 | 1,3 | NA | 2 | NA
- * AH | 4x10Gbps | 0,3 | 1,2 | NA | 2 | NA
- * AH | 4x10Gbps | 0,1,2,3 | NA | NA | 1 | NA
- *======+==================+=========+=========+========+=======================
+/*----+------------------------------------------------------------------------
+ * C | Number and | Ports in| Ports in|2 PHY-s |# of ports|# of engines
+ * h | rate of | team #1 | team #2 |are used|per path | (paths)
+ * i | physical | | | | | enabled
+ * p | ports | | | | |
+ *====+============+=========+=========+========+==========+===================
+ * BB | 1x100G | This is special mode, where there are actually 2 HW func
+ * BB | 2x10/20Gbps| 0,1 | NA | No | 1 | 1
+ * BB | 2x40 Gbps | 0,1 | NA | Yes | 1 | 1
+ * BB | 2x50Gbps | 0,1 | NA | No | 1 | 1
+ * BB | 4x10Gbps | 0,2 | 1,3 | No | 1/2 | 1,2 (2 is optional)
+ * BB | 4x10Gbps | 0,1 | 2,3 | No | 1/2 | 1,2 (2 is optional)
+ * BB | 4x10Gbps | 0,3 | 1,2 | No | 1/2 | 1,2 (2 is optional)
+ * BB | 4x10Gbps | 0,1,2,3 | NA | No | 1 | 1
+ * AH | 2x10/20Gbps| 0,1 | NA | NA | 1 | NA
+ * AH | 4x10Gbps | 0,1 | 2,3 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,2 | 1,3 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,3 | 1,2 | NA | 2 | NA
+ * AH | 4x10Gbps | 0,1,2,3 | NA | NA | 1 | NA
+ *====+============+=========+=========+========+==========+===================
*/
#define CMT_TEAM0 0
@@ -216,19 +245,16 @@ struct lldp_config_params_s {
u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
/* Holds local Port ID TLV header, subtype and 9B of payload.
* If firtst byte is 0, then we will use default port ID
- */
+ */
u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
};
struct lldp_status_params_s {
u32 prefix_seq_num;
- u32 status; /* TBD */
- /* Holds remote Chassis ID TLV header, subtype and 9B of payload.
- */
- u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 status; /* TBD */
+ /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- /* Holds remote Port ID TLV header, subtype and 9B of payload.
- */
+ /* Holds remote Port ID TLV header, subtype and 9B of payload. */
u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
u32 suffix_seq_num;
};
@@ -245,11 +271,27 @@ struct dcbx_ets_feature {
#define DCBX_ETS_CBS_SHIFT 3
#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
#define DCBX_ETS_MAX_TCS_SHIFT 4
- u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
+#define DCBX_ISCSI_OOO_TC_SHIFT 8
+/* Entries in tc table are orginized that the left most is pri 0, right most is
+ * prio 7
+ */
+
+ u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC (4)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
#define DCBX_CEE_STRICT_PRIORITY 0xf
-#define DCBX_CEE_STRICT_PRIORITY_TC 0x7
- u32 tc_bw_tbl[2];
- u32 tc_tsa_tbl[2];
+/* Entries in tc table are orginized that the left most is pri 0, right most is
+ * prio 7
+ */
+
+ u32 tc_bw_tbl[2];
+/* Entries in tc table are orginized that the left most is pri 0, right most is
+ * prio 7
+ */
+
+ u32 tc_tsa_tbl[2];
#define DCBX_ETS_TSA_STRICT 0
#define DCBX_ETS_TSA_CBS 1
#define DCBX_ETS_TSA_ETS 2
@@ -271,10 +313,19 @@ struct dcbx_app_priority_entry {
#define DCBX_APP_SF_SHIFT 8
#define DCBX_APP_SF_ETHTYPE 0
#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
#define DCBX_APP_PROTOCOL_ID_SHIFT 16
};
+
/* FW structure in BE */
struct dcbx_app_priority_feature {
u32 flags;
@@ -285,14 +336,14 @@ struct dcbx_app_priority_feature {
#define DCBX_APP_ERROR_MASK 0x00000004
#define DCBX_APP_ERROR_SHIFT 2
/* Not in use
- * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
- * #define DCBX_APP_DEFAULT_PRI_SHIFT 8
- */
+ #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
+ #define DCBX_APP_DEFAULT_PRI_SHIFT 8
+ */
#define DCBX_APP_MAX_TCS_MASK 0x0000f000
#define DCBX_APP_MAX_TCS_SHIFT 12
#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
#define DCBX_APP_NUM_ENTRIES_SHIFT 16
- struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
};
/* FW structure in BE */
@@ -331,11 +382,12 @@ struct dcbx_features {
struct dcbx_local_params {
u32 config;
-#define DCBX_CONFIG_VERSION_MASK 0x00000003
+#define DCBX_CONFIG_VERSION_MASK 0x00000007
#define DCBX_CONFIG_VERSION_SHIFT 0
#define DCBX_CONFIG_VERSION_DISABLED 0
#define DCBX_CONFIG_VERSION_IEEE 1
#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_STATIC 4
u32 flags;
struct dcbx_features features;
@@ -345,12 +397,13 @@ struct dcbx_mib {
u32 prefix_seq_num;
u32 flags;
/*
- * #define DCBX_CONFIG_VERSION_MASK 0x00000003
- * #define DCBX_CONFIG_VERSION_SHIFT 0
- * #define DCBX_CONFIG_VERSION_DISABLED 0
- * #define DCBX_CONFIG_VERSION_IEEE 1
- * #define DCBX_CONFIG_VERSION_CEE 2
- */
+ #define DCBX_CONFIG_VERSION_MASK 0x00000007
+ #define DCBX_CONFIG_VERSION_SHIFT 0
+ #define DCBX_CONFIG_VERSION_DISABLED 0
+ #define DCBX_CONFIG_VERSION_IEEE 1
+ #define DCBX_CONFIG_VERSION_CEE 2
+ #define DCBX_CONFIG_VERSION_STATIC 4
+ */
struct dcbx_features features;
u32 suffix_seq_num;
};
@@ -361,25 +414,39 @@ struct lldp_system_tlvs_buffer_s {
u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
};
+struct dcb_dscp_map {
+ u32 flags;
+#define DCB_DSCP_ENABLE_MASK 0x1
+#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE 1
+ u32 dscp_pri_map[8];
+};
+
/**************************************/
/* */
/* P U B L I C G L O B A L */
/* */
/**************************************/
struct public_global {
- u32 max_path; /* 32bit is wasty, but this will be used often */
- u32 max_ports; /* (Global) 32bit is wasty, this will be used often */
-#define MODE_1P 1 /* TBD - NEED TO THINK OF A BETTER NAME */
+ u32 max_path; /* 32bit is wasty, but this will be used often */
+/* (Global) 32bit is wasty, but this will be used often */
+ u32 max_ports;
+#define MODE_1P 1 /* TBD - NEED TO THINK OF A BETTER NAME */
#define MODE_2P 2
#define MODE_3P 3
#define MODE_4P 4
u32 debug_mb_offset;
u32 phymod_dbg_mb_offset;
struct couple_mode_teaming cmt;
+/* Temperature in Celcius (-255C / +255C), measured every second. */
s32 internal_temperature;
u32 mfw_ver;
u32 running_bundle_id;
s32 external_temperature;
+ u32 mdump_reason;
+#define MDUMP_REASON_INTERNAL_ERROR (1 << 0)
+#define MDUMP_REASON_EXTERNAL_TRIGGER (1 << 1)
+#define MDUMP_REASON_DUMP_AGED (1 << 2)
};
/**************************************/
@@ -406,7 +473,7 @@ struct public_global {
struct fw_flr_mb {
u32 aggint;
u32 opgen_addr;
- u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
+ u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
#define ACCUM_ACK_PF_BASE 0
#define ACCUM_ACK_PF_SHIFT 0
@@ -424,10 +491,10 @@ struct public_path {
* mcp_vf_disabled is set by the MCP to indicate the driver about VFs
* which were disabled/flred
*/
- u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; /* 0x003c */
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; /* 0x003c */
+/* Reset on mcp reset, and incremented for eveny process kill event. */
u32 process_kill;
- /* Reset on mcp reset, and incremented for eveny process kill event. */
#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
#define PROCESS_KILL_COUNTER_SHIFT 0
#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
@@ -464,7 +531,7 @@ struct dci_fc_npiv_tbl {
****************************************************************************/
struct public_port {
- u32 validity_map; /* 0x0 (4*2 = 0x8) */
+ u32 validity_map; /* 0x0 (4*2 = 0x8) */
/* validity bits */
#define MCP_VALIDITY_PCI_CFG 0x00100000
@@ -473,7 +540,8 @@ struct public_port {
#define MCP_VALIDITY_RESERVED 0x00000007
/* One licensing bit should be set */
-#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 /* yaniv - tbd */
+/* yaniv - tbd ? license */
+#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
@@ -485,7 +553,7 @@ struct public_port {
#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
u32 link_status;
-#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_LINK_UP 0x00000001
#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
@@ -501,7 +569,7 @@ struct public_port {
#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_PFC_ENABLED 0x00000100
#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
@@ -525,9 +593,15 @@ struct public_port {
#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+#define LINK_STATUS_FEC_MODE_MASK 0x38000000
+#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
+#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+
u32 link_status1;
u32 ext_phy_fw_version;
- u32 drv_phy_cfg_addr; /* Points to pmm_phy_cfg (For READ-ONLY) */
+/* Points to struct eth_phy_cfg (For READ-ONLY) */
+ u32 drv_phy_cfg_addr;
u32 port_stx;
@@ -537,15 +611,15 @@ struct public_port {
struct port_stats stats;
u32 media_type;
-#define MEDIA_UNSPECIFIED 0x0
-#define MEDIA_SFPP_10G_FIBER 0x1
-#define MEDIA_XFP_FIBER 0x2
-#define MEDIA_DA_TWINAX 0x3
-#define MEDIA_BASE_T 0x4
-#define MEDIA_SFP_1G_FIBER 0x5
-#define MEDIA_MODULE_FIBER 0x6
-#define MEDIA_KR 0xf0
-#define MEDIA_NOT_PRESENT 0xff
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1 /* Use MEDIA_MODULE_FIBER instead */
+#define MEDIA_XFP_FIBER 0x2 /* Use MEDIA_MODULE_FIBER instead */
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5 /* Use MEDIA_MODULE_FIBER instead */
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
u32 lfa_status;
#define LFA_LINK_FLAP_REASON_OFFSET 0
@@ -565,6 +639,7 @@ struct public_port {
u32 link_change_count;
/* LLDP params */
+/* offset: 536 bytes? */
struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
@@ -574,60 +649,75 @@ struct public_port {
struct dcbx_mib remote_dcbx_mib;
struct dcbx_mib operational_dcbx_mib;
- /* FC_NPIV table offset & size in NVRAM value of 0 means not present */
+/* FC_NPIV table offset & size in NVRAM value of 0 means not present */
+
u32 fc_npiv_nvram_tbl_addr;
u32 fc_npiv_nvram_tbl_size;
u32 transceiver_data;
-#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF
-#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define PMM_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
-#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001
-#define PMM_TRANSCEIVER_STATE_VALID 0x00000003
-#define PMM_TRANSCEIVER_STATE_UPDATING 0x00000008
-#define PMM_TRANSCEIVER_TYPE_MASK 0x0000FF00
-#define PMM_TRANSCEIVER_TYPE_SHIFT 0x00000008
-#define PMM_TRANSCEIVER_TYPE_NONE 0x00000000
-#define PMM_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
-#define PMM_TRANSCEIVER_TYPE_1G_PCC 0x01 /* 1G Passive copper cable */
-#define PMM_TRANSCEIVER_TYPE_1G_ACC 0x02 /* 1G Active copper cable */
-#define PMM_TRANSCEIVER_TYPE_1G_LX 0x03
-#define PMM_TRANSCEIVER_TYPE_1G_SX 0x04
-#define PMM_TRANSCEIVER_TYPE_10G_SR 0x05
-#define PMM_TRANSCEIVER_TYPE_10G_LR 0x06
-#define PMM_TRANSCEIVER_TYPE_10G_LRM 0x07
-#define PMM_TRANSCEIVER_TYPE_10G_ER 0x08
-#define PMM_TRANSCEIVER_TYPE_10G_PCC 0x09 /* 10G Passive copper cable */
-#define PMM_TRANSCEIVER_TYPE_10G_ACC 0x0a /* 10G Active copper cable */
-#define PMM_TRANSCEIVER_TYPE_XLPPI 0x0b
-#define PMM_TRANSCEIVER_TYPE_40G_LR4 0x0c
-#define PMM_TRANSCEIVER_TYPE_40G_SR4 0x0d
-#define PMM_TRANSCEIVER_TYPE_40G_CR4 0x0e
-#define PMM_TRANSCEIVER_TYPE_100G_AOC 0x0f /* Active optical cable */
-#define PMM_TRANSCEIVER_TYPE_100G_SR4 0x10
-#define PMM_TRANSCEIVER_TYPE_100G_LR4 0x11
-#define PMM_TRANSCEIVER_TYPE_100G_ER4 0x12
-#define PMM_TRANSCEIVER_TYPE_100G_ACC 0x13 /* Active copper cable */
-#define PMM_TRANSCEIVER_TYPE_100G_CR4 0x14
-#define PMM_TRANSCEIVER_TYPE_4x10G_SR 0x15
-#define PMM_TRANSCEIVER_TYPE_25G_PCC_S 0x16
-#define PMM_TRANSCEIVER_TYPE_25G_ACC_S 0x17
-#define PMM_TRANSCEIVER_TYPE_25G_PCC_M 0x18
-#define PMM_TRANSCEIVER_TYPE_25G_ACC_M 0x19
-#define PMM_TRANSCEIVER_TYPE_25G_PCC_L 0x1a
-#define PMM_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
-#define PMM_TRANSCEIVER_TYPE_25G_SR 0x1c
-#define PMM_TRANSCEIVER_TYPE_25G_LR 0x1d
-#define PMM_TRANSCEIVER_TYPE_25G_AOC 0x1e
-
-#define PMM_TRANSCEIVER_TYPE_4x10G 0x1d
-#define PMM_TRANSCEIVER_TYPE_4x25G_CR 0x1e
-#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40GR 0x30
-#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
-#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
-#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
-#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
-#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
-#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
+#define ETH_TRANSCEIVER_TYPE_SHIFT 0x00000008
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
+/* 1G Passive copper cable */
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+/* 1G Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+/* 10G Passive copper cable */
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+/* 10G Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f /* Active optical cable */
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 /* Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+/* 25G Passive copper cable - short */
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+/* 25G Active copper cable - short */
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+/* 25G Passive copper cable - medium */
+#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
+/* 25G Active copper cable - medium */
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+/* 25G Passive copper cable - long */
+#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
+/* 25G Active copper cable - long */
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
+#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
+#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
+
+#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
+#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
+ u32 wol_info;
+ u32 wol_pkt_len;
+ u32 wol_pkt_details;
+ struct dcb_dscp_map dcb_dscp_map;
};
/**************************************/
@@ -637,11 +727,13 @@ struct public_port {
/**************************************/
struct public_func {
- u32 dpdk_rsvd1[2];
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
/* MTU size per funciton is needed for the OV feature */
u32 mtu_size;
- /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+/* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */
+
/* For PCP values 0-3 use the map lower */
/* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1,
* 0x0000FF00 - PCP 2, 0x000000FF PCP 3
@@ -650,7 +742,7 @@ struct public_func {
/* For PCP values 4-7 use the map upper */
/* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5,
* 0x0000FF00 - PCP 6, 0x000000FF PCP 7
- */
+ */
u32 c2s_pcp_map_upper;
/* For PCP default value get the MSB byte of the map default */
@@ -666,10 +758,14 @@ struct public_func {
#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
/* MINBW, MAXBW */
/* value range - 0..100, increments in 1 % */
@@ -683,23 +779,27 @@ struct public_func {
u32 status;
#define FUNC_STATUS_VLINK_DOWN 0x00000001
- u32 mac_upper; /* MAC */
+ u32 mac_upper; /* MAC */
#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
u32 mac_lower;
#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
- u32 dpdk_rsvd2[4];
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
- u32 ovlan_stag; /* tags */
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag; /* tags */
#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
#define FUNC_MF_CFG_OV_STAG_SHIFT 0
#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
- u32 pf_allocation; /* vf per pf */
+ u32 pf_allocation; /* vf per pf */
- u32 preserve_data; /* Will be used bt CCM */
+ u32 preserve_data; /* Will be used bt CCM */
u32 driver_last_activity_ts;
@@ -707,7 +807,7 @@ struct public_func {
* drv_ack_vf_disabled is set by the PF driver to ack handled disabled
* VFs
*/
- u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
u32 drv_id;
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
@@ -747,7 +847,7 @@ struct public_func {
*/
struct mcp_mac {
- u32 mac_upper; /* Upper 16 bits are always zeroes */
+ u32 mac_upper; /* Upper 16 bits are always zeroes */
u32 mac_lower;
};
@@ -761,6 +861,13 @@ struct mcp_file_att {
u32 len;
};
+struct bist_nvm_image_att {
+ u32 return_code;
+ u32 image_type; /* Image type */
+ u32 nvm_start_addr; /* NVM address of the image */
+ u32 len; /* Include CRC */
+};
+
#define MCP_DRV_VER_STR_SIZE 16
#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
#define MCP_DRV_NVM_BUF_LEN 32
@@ -777,19 +884,106 @@ struct lan_stats_stc {
u32 rserved;
};
+struct fcoe_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct iscsi_stats_stc {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct rdma_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
struct ocbb_data_stc {
u32 ocbb_host_addr;
u32 ocsd_host_addr;
u32 ocsd_req_update_interval;
};
+#define MAX_NUM_OF_SENSORS 7
+#define MFW_SENSOR_LOCATION_INTERNAL 1
+#define MFW_SENSOR_LOCATION_EXTERNAL 2
+#define MFW_SENSOR_LOCATION_SFP 3
+
+#define SENSOR_LOCATION_SHIFT 0
+#define SENSOR_LOCATION_MASK 0x000000ff
+#define THRESHOLD_HIGH_SHIFT 8
+#define THRESHOLD_HIGH_MASK 0x0000ff00
+#define CRITICAL_TEMPERATURE_SHIFT 16
+#define CRITICAL_TEMPERATURE_MASK 0x00ff0000
+#define CURRENT_TEMP_SHIFT 24
+#define CURRENT_TEMP_MASK 0xff000000
+struct temperature_status_stc {
+ u32 num_of_sensors;
+ u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+ u32 version;
+ u32 config;
+ u32 epoc;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+enum resource_id_enum {
+ RESOURCE_NUM_SB_E = 0,
+ RESOURCE_NUM_L2_QUEUE_E = 1,
+ RESOURCE_NUM_VPORT_E = 2,
+ RESOURCE_NUM_VMQ_E = 3,
+/* Not a real resource!! it's a factor used to calculate others */
+ RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
+/* Not a real resource!! it's a factor used to calculate others */
+ RESOURCE_FACTOR_RSS_PER_VF_E = 5,
+ RESOURCE_NUM_RL_E = 6,
+ RESOURCE_NUM_PQ_E = 7,
+ RESOURCE_NUM_VF_E = 8,
+ RESOURCE_VFC_FILTER_E = 9,
+ RESOURCE_ILT_E = 10,
+ RESOURCE_CQS_E = 11,
+ RESOURCE_GFT_PROFILES_E = 12,
+ RESOURCE_NUM_TC_E = 13,
+ RESOURCE_NUM_RSS_ENGINES_E = 14,
+ RESOURCE_LL2_QUEUE_E = 15,
+ RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_MAX_NUM,
+ RESOURCE_NUM_INVALID = 0xFFFFFFFF
+};
+
+/* Resource ID is to be filled by the driver in the MB request
+ * Size, offset & flags to be filled by the MFW in the MB response
+ */
+struct resource_info {
+ enum resource_id_enum res_id;
+ u32 size; /* number of allocated resources */
+ u32 offset; /* Offset of the 1st resource */
+ u32 vf_size;
+ u32 vf_offset;
+ u32 flags;
+#define RESOURCE_ELEMENT_STRICT (1 << 0)
+};
+
union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; /* LOAD_REQ */
- struct mcp_mac wol_mac; /* UNLOAD_DONE */
+ u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; /* LOAD_REQ */
+ struct mcp_mac wol_mac; /* UNLOAD_DONE */
+
+/* This configuration should be set by the driver for the LINK_SET command. */
- struct pmm_phy_cfg drv_phy_cfg;
+ struct eth_phy_cfg drv_phy_cfg;
- struct mcp_val64 val64; /* For PHY / AVS commands */
+ struct mcp_val64 val64; /* For PHY / AVS commands */
u8 raw_data[MCP_DRV_NVM_BUF_LEN];
@@ -800,9 +994,14 @@ union drv_union_data {
struct drv_version_stc drv_version;
struct lan_stats_stc lan_stats;
- u32 dpdk_rsvd[3];
+ struct fcoe_stats_stc fcoe_stats;
+ struct iscsi_stats_stc icsci_stats;
+ struct rdma_stats_stc rdma_stats;
struct ocbb_data_stc ocbb_info;
-
+ struct temperature_status_stc temp_info;
+ struct resource_info resource;
+ struct bist_nvm_image_att nvm_image_att;
+ struct mdump_config_stc mdump_config;
/* ... */
};
@@ -822,7 +1021,7 @@ struct public_drv_mb {
/* Vitaly: LLDP commands */
#define DRV_MSG_CODE_SET_LLDP 0x24000000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
- /* OneView feature driver HSI */
+ /* OneView feature driver HSI*/
#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
@@ -833,49 +1032,184 @@ struct public_drv_mb {
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
+/* DRV_MB Param: driver version supp, FW_MB param: MFW version supp,
+ * data: struct resource_info
+ */
+#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+
+/*deprecated don't use*/
+#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
+#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+/* Param is either DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW/IMAGE */
#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
+/* Param should be set to the transaction size (up to 64 bytes) */
#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
+/* MFW will place the file offset and len in file_att struct */
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
+/* Read 32bytes of nvram data. Param is [0:23] – Offset [24:31] –
+ * Len in Bytes
+ */
#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
+/* Writes up to 32Bytes to nvram. Param is [0:23] – Offset [24:31] –
+ * Len in Bytes. In case this address is in the range of secured file in
+ * secured mode, the operation will fail
+ */
#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
+/* Delete a file from nvram. Param is image_type. */
#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
+/* Reset MCP when no NVM operation is going on, and no drivers are loaded.
+ * In case operation succeed, MCP will not ack back.
+ */
#define DRV_MSG_CODE_MCP_RESET 0x00090000
+/* Temporary command to set secure mode, where the param is 0 (None secure) /
+ * 1 (Secure) / 2 (Full-Secure)
+ */
#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
+/* Param: [0:15] - Address, [16:18] - lane# (0/1/2/3 - for single lane,
+ * 4/5 - for dual lanes, 6 - for all lanes, [28] - PMD reg, [29] - select port,
+ * [30:31] - port
+ */
#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
+/* Param: [0:15] - Address, [16:18] - lane# (0/1/2/3 - for single lane,
+ * 4/5 - for dual lanes, 6 - for all lanes, [28] - PMD reg, [29] - select port,
+ * [30:31] - port
+ */
#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
+/* Param: [0:15] - Address, [30:31] - port */
#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
+/* Param: [0:15] - Address, [30:31] - port */
#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
+/* Param: [0:3] - version, [4:15] - name (null terminated) */
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+/* Halts the MCP. To resume MCP, user will need to use
+ * MCP_REG_CPU_STATE/MCP_REG_CPU_MODE registers.
+ */
#define DRV_MSG_CODE_MCP_HALT 0x00100000
+/* Host shall provide buffer and size for MFW */
#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000
+/* Host shall provide buffer and size for MFW */
#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000
+/* Param: [0:1] - Port, [2:7] - read size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+/* Param: [0:1] - Port, [2:7] - write size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000
+/* Set virtual mac address, params [31:6] - reserved, [5:4] - type,
+ * [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
+ */
#define DRV_MSG_CODE_SET_VMAC 0x00110000
+/* Set virtual mac address, params [31:6] - reserved, [5:4] - type,
+ * [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
+ */
#define DRV_MSG_CODE_GET_VMAC 0x00120000
#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
+/* Get statistics from pf, params [31:4] - reserved, [3:0] - stats type */
#define DRV_MSG_CODE_GET_STATS 0x00130000
#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+/* indicate OCBB related information */
#define DRV_MSG_CODE_OCBB_DATA 0x00180000
+
+/* Set function BW, params[15:8] - min, params[7:0] - max */
#define DRV_MSG_CODE_SET_BW 0x00190000
+#define BW_MAX_MASK 0x000000ff
+#define BW_MAX_SHIFT 0
+#define BW_MIN_MASK 0x0000ff00
+#define BW_MIN_SHIFT 8
+
+/* When param is set to 1, all parities will be masked(disabled). When params
+ * are set to 0, parities will be unmasked again.
+ */
#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
+/* param[0] - Simulate fan failure, param[1] - simulate over temp. */
#define DRV_MSG_CODE_INDUCE_FAILURE 0x001b0000
#define DRV_MSG_FAN_FAILURE_TYPE (1 << 0)
#define DRV_MSG_TEMPERATURE_FAILURE_TYPE (1 << 1)
+/* Param: [0:15] - gpio number */
#define DRV_MSG_CODE_GPIO_READ 0x001c0000
+/* Param: [0:15] - gpio number, [16:31] - gpio value */
#define DRV_MSG_CODE_GPIO_WRITE 0x001d0000
+/* Param: [0:15] - gpio number */
+#define DRV_MSG_CODE_GPIO_INFO 0x00270000
+
+/* Param: [0:7] - test enum, [8:15] - image index, [16:31] - reserved */
+#define DRV_MSG_CODE_BIST_TEST 0x001e0000
+#define DRV_MSG_CODE_GET_TEMPERATURE 0x001f0000
+/* Set LED mode params :0 operational, 1 LED turn ON, 2 LED turn OFF */
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+/* drv_data[7:0] - EPOC in seconds, drv_data[15:8] -
+ * driver version (MAJ MIN BUILD SUB)
+ */
+#define DRV_MSG_CODE_TIMESTAMP 0x00210000
+/* This is an empty mailbox just return OK*/
#define DRV_MSG_CODE_EMPTY_MB 0x00220000
+/* Param[0:4] - resource number (0-31), Param[5:7] - opcode,
+ * param[15:8] - age
+ */
+#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+
+/* request resource ownership with default aging */
+#define RESOURCE_OPCODE_REQ 1
+/* request resource ownership without aging */
+#define RESOURCE_OPCODE_REQ_WO_AGING 2
+/* request resource ownership with specific aging timer (in seconds) */
+#define RESOURCE_OPCODE_REQ_W_AGING 3
+#define RESOURCE_OPCODE_RELEASE 4 /* release resource */
+#define RESOURCE_OPCODE_FORCE_RELEASE 5 /* force resource release */
+
+/* resource is free and granted to requester */
+#define RESOURCE_OPCODE_GNT 1
+/* resource is busy, param[7:0] indicates owner as follow 0-15 = PF0-15,
+ * 16 = MFW, 17 = diag over serial
+ */
+#define RESOURCE_OPCODE_BUSY 2
+/* indicate release request was acknowledged */
+#define RESOURCE_OPCODE_RELEASED 3
+/* indicate release request was previously received by other owner */
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
+/* indicate wrong owner during release */
+#define RESOURCE_OPCODE_WRONG_OWNER 5
+#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+/* dedicate resource 0 for dump */
+#define RESOURCE_DUMP (1 << 0)
+
+#define DRV_MSG_CODE_GET_MBA_VERSION 0x00240000 /* Get MBA version */
+
+/* Send crash dump commands with param[3:0] - opcode */
+#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
+/* acknowledge reception of error indication */
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+/* set epoc and personality as follow: drv_data[3:0] - epoch,
+ * drv_data[7:4] - personality
+ */
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+/* trigger crash dump procedure */
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+/* Request valid logs and config words */
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+/* Set triggers mask. drv_mb_param should indicate (bitwise) which trigger
+ * enabled
+ */
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 /* Clear all logs */
+
+
+#define DRV_MSG_CODE_MEM_ECC_EVENTS 0x00260000 /* Param: None */
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -893,7 +1227,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
- /* LLDP / DCBX params */
+ /* LLDP / DCBX params*/
#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
@@ -925,7 +1259,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_PHYMOD_LANE_MASK 0x000000FF
#define DRV_MB_PARAM_PHYMOD_SIZE_SHIFT 8
#define DRV_MB_PARAM_PHYMOD_SIZE_MASK 0x000FFF00
- /* configure vf MSIX params */
+ /* configure vf MSIX params*/
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
@@ -943,16 +1277,20 @@ struct public_drv_mb {
#define DRV_MB_PARAM_OV_CURR_CFG_DCI 6
#define DRV_MB_PARAM_OV_CURR_CFG_HII 7
-#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_SHIFT 0
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_SHIFT 0
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK 0x000000FF
-#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE (1 << 0)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE (1 << 0)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_IP_ACQUIRED (1 << 1)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_FCOE_FABRIC_LOGIN_SUCCESS (1 << 1)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_TRARGET_FOUND (1 << 2)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_CHAP_SUCCESS (1 << 3)
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_FCOE_LUN_FOUND (1 << 3)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_LOGGED_INTO_TGT (1 << 4)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_IMG_DOWNLOADED (1 << 5)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF (1 << 6)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED 0
-#define DRV_MB_PARAM_OV_PCI_BUS_NUM_SHIFT 0
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_SHIFT 0
#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK 0x000000FF
#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
@@ -965,9 +1303,12 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
+/* Not Installed*/
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
+/* installed but disabled by user/admin/OS */
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
+/* installed and active */
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
@@ -990,6 +1331,32 @@ struct public_drv_mb {
#define DRV_MB_PARAM_GPIO_NUMBER_MASK 0x0000FFFF
#define DRV_MB_PARAM_GPIO_VALUE_SHIFT 16
#define DRV_MB_PARAM_GPIO_VALUE_MASK 0xFFFF0000
+#define DRV_MB_PARAM_GPIO_DIRECTION_SHIFT 16
+#define DRV_MB_PARAM_GPIO_DIRECTION_MASK 0x00FF0000
+#define DRV_MB_PARAM_GPIO_CTRL_SHIFT 24
+#define DRV_MB_PARAM_GPIO_CTRL_MASK 0xFF000000
+
+ /* Resource Allocation params - Driver version support*/
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
+#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
@@ -1017,6 +1384,10 @@ struct public_drv_mb {
#define FW_MSG_CODE_UPDATE_DRIVER_STATE_DONE 0x31000000
#define FW_MSG_CODE_DRV_MSG_CODE_BW_UPDATE_DONE 0x32000000
#define FW_MSG_CODE_DRV_MSG_CODE_MTU_SIZE_DONE 0x33000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
+#define FW_MSG_CODE_RESOURCE_ALLOC_GEN_ERR 0x37000000
#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
@@ -1045,6 +1416,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
+/* MFW reject "mcp reset" command if one of the drivers is up */
#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
#define FW_MSG_CODE_PHY_OK 0x00110000
#define FW_MSG_CODE_PHY_ERROR 0x00120000
@@ -1063,16 +1435,31 @@ struct public_drv_mb {
#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
-#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
+#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
#define FW_MSG_CODE_GPIO_OK 0x00160000
#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000
#define FW_MSG_CODE_GPIO_CTRL_ERR 0x00020000
#define FW_MSG_CODE_GPIO_INVALID 0x000f0000
#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
+#define FW_MSG_CODE_BIST_TEST_INVALID 0x000f0000
+
+/* mdump related response codes */
+#define FW_MSG_CODE_MDUMP_NO_IMAGE_FOUND 0x00010000
+#define FW_MSG_CODE_MDUMP_ALLOC_FAILED 0x00020000
+#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
+#define FW_MSG_CODE_MDUMP_IN_PROGRESS 0x00040000
+#define FW_MSG_CODE_MDUMP_WRITE_FAILED 0x00050000
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
u32 fw_mb_param;
+ /* Resource Allocation params - MFW version support*/
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
@@ -1097,7 +1484,10 @@ struct public_drv_mb {
#define MCP_EVENT_MASK 0xffff0000
#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+/* The union data is used by the driver to pass parameters to the scratchpad. */
+
union drv_union_data union_data;
+
};
/* MFW - DRV MB */
@@ -1132,6 +1522,7 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_GET_RDMA_STATS,
MFW_DRV_MSG_FAILURE_DETECTED,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
MFW_DRV_MSG_MAX
};
@@ -1152,8 +1543,10 @@ enum MFW_DRV_MSG_TYPE {
((u8)((u8 *)(MFW_MB_P(shmem_func)->msg))[msg_id]++;)
struct public_mfw_mb {
- u32 sup_msgs; /* Assigend with MFW_DRV_MSG_MAX */
+ u32 sup_msgs; /* Assigend with MFW_DRV_MSG_MAX */
+/* Incremented by the MFW */
u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+/* Incremented by the driver */
u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
};
@@ -1163,8 +1556,8 @@ struct public_mfw_mb {
/* */
/**************************************/
enum public_sections {
- PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
- PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
+ PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
PUBLIC_GLOBAL,
PUBLIC_PATH,
PUBLIC_PORT,
@@ -1202,4 +1595,4 @@ struct mcp_public_data {
#define MAX_I2C_TRANSACTION_SIZE 16
#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256
-#endif /* MCP_PUBLIC_H */
+#endif /* MCP_PUBLIC_H */
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
index 7f1a60dd..8e9c08a7 100644
--- a/drivers/net/qede/base/nvm_cfg.h
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -13,7 +13,7 @@
* Description: NVM config file - Generated file from nvm cfg excel.
* DO NOT MODIFY !!!
*
- * Created: 1/14/2016
+ * Created: 5/9/2016
*
****************************************************************************/
@@ -22,8 +22,8 @@
struct nvm_cfg_mac_address {
u32 mac_addr_hi;
-#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
-#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+ #define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+ #define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
u32 mac_addr_lo;
};
@@ -31,875 +31,1370 @@ struct nvm_cfg_mac_address {
* nvm_cfg1 structs
******************************************/
struct nvm_cfg1_glob {
- u32 generic_cont0; /* 0x0 */
-#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
-#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
-#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
-#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
-#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
-#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
-#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
-#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
-#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
-#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
-#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
-#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
-#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
-#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
-#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
-#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
-#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
-#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
-#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
-#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
- u32 engineering_change[3]; /* 0x4 */
- u32 manufacturing_id; /* 0x10 */
- u32 serial_number[4]; /* 0x14 */
- u32 pcie_cfg; /* 0x24 */
-#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
-#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
-#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_MASK 0x00000020
-#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_OFFSET 5
-#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
-#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
-#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
-#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
- /* Set the duration, in seconds, fan failure signal should be
- * sampled
- */
-#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_MASK 0x80000000
-#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_OFFSET 31
- u32 mgmt_traffic; /* 0x28 */
-#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
-#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
-#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
-#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
-#define NVM_CFG1_GLOB_AUX_MODE_MASK 0x78000000
-#define NVM_CFG1_GLOB_AUX_MODE_OFFSET 27
-#define NVM_CFG1_GLOB_AUX_MODE_DEFAULT 0x0
-#define NVM_CFG1_GLOB_AUX_MODE_SMBUS_ONLY 0x1
+ u32 generic_cont0; /* 0x0 */
+ #define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
+ #define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
+ #define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
+ #define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
+ #define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
+ #define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+ #define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+ #define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+ #define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+ #define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+ #define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+ #define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+ #define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+ #define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+ #define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
+ #define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
+ #define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
+ #define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
+ #define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
+ #define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
+ #define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
+ #define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
+ #define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
+ #define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
+ #define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
+ #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
+ #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
+ #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
+ #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
+ u32 engineering_change[3]; /* 0x4 */
+ u32 manufacturing_id; /* 0x10 */
+ u32 serial_number[4]; /* 0x14 */
+ u32 pcie_cfg; /* 0x24 */
+ #define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
+ #define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
+ #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
+ #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
+ #define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
+ #define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
+ #define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3
+ #define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_MASK \
+ 0x00000020
+ #define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_OFFSET 5
+ #define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
+ #define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
+ #define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
+ #define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
+ /* Set the duration, in sec, fan failure signal should be sampled */
+ #define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_MASK \
+ 0x80000000
+ #define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_OFFSET 31
+ u32 mgmt_traffic; /* 0x28 */
+ #define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
+ #define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
+ #define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
+ #define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
+ #define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
+ #define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
+ #define NVM_CFG1_GLOB_AUX_MODE_MASK 0x78000000
+ #define NVM_CFG1_GLOB_AUX_MODE_OFFSET 27
+ #define NVM_CFG1_GLOB_AUX_MODE_DEFAULT 0x0
+ #define NVM_CFG1_GLOB_AUX_MODE_SMBUS_ONLY 0x1
/* Indicates whether external thermal sonsor is available */
-#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_MASK 0x80000000
-#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_OFFSET 31
-#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_DISABLED 0x0
-#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ENABLED 0x1
- u32 core_cfg; /* 0x2C */
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
-#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
-#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
-#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
-#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
-#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
-#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
-#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
-#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_CFG 0x1
-#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_OTP 0x2
-#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
- u32 e_lane_cfg1; /* 0x30 */
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
- u32 e_lane_cfg2; /* 0x34 */
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
-#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
-#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
-#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
-#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
-#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
-#define NVM_CFG1_GLOB_NCSI_OFFSET 12
-#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
-#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_MASK 0x80000000
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_OFFSET 31
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_DISABLED 0x0
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ENABLED 0x1
+ u32 core_cfg; /* 0x2C */
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_MASK 0x00000100
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_OFFSET 8
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+ #define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_MASK 0x00000200
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_OFFSET 9
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_DISABLED 0x0
+ #define NVM_CFG1_GLOB_MPS25_ENFORCE_TX_FIR_CFG_ENABLED 0x1
+ #define NVM_CFG1_GLOB_MPS10_CORE_ADDR_MASK 0x0003FC00
+ #define NVM_CFG1_GLOB_MPS10_CORE_ADDR_OFFSET 10
+ #define NVM_CFG1_GLOB_MPS25_CORE_ADDR_MASK 0x03FC0000
+ #define NVM_CFG1_GLOB_MPS25_CORE_ADDR_OFFSET 18
+ #define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
+ #define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
+ #define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
+ #define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_CFG 0x1
+ #define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_OTP 0x2
+ #define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
+ u32 e_lane_cfg1; /* 0x30 */
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+ u32 e_lane_cfg2; /* 0x34 */
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+ #define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
+ #define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
+ #define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
+ #define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_NCSI_OFFSET 12
+ #define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
+ #define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
/* Maximum advertised pcie link width */
-#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_MASK 0x000F0000
-#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_OFFSET 16
-#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_16_LANES 0x0
-#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_1_LANE 0x1
-#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_2_LANES 0x2
-#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_4_LANES 0x3
-#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_8_LANES 0x4
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_MASK 0x000F0000
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_OFFSET 16
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_BB_16_LANES 0x0
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_1_LANE 0x1
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_2_LANES 0x2
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_4_LANES 0x3
+ #define NVM_CFG1_GLOB_MAX_LINK_WIDTH_8_LANES 0x4
/* ASPM L1 mode */
-#define NVM_CFG1_GLOB_ASPM_L1_MODE_MASK 0x00300000
-#define NVM_CFG1_GLOB_ASPM_L1_MODE_OFFSET 20
-#define NVM_CFG1_GLOB_ASPM_L1_MODE_FORCED 0x0
-#define NVM_CFG1_GLOB_ASPM_L1_MODE_DYNAMIC_LOW_LATENCY 0x1
-#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_MASK 0x01C00000
-#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_OFFSET 22
-#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C 0x1
-#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY 0x2
-#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS 0x3
-#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK 0x06000000
-#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET 25
-#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE 0x0
-#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL 0x1
-#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_EXTERNAL 0x2
-#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_BOTH 0x3
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_MASK 0x00300000
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_OFFSET 20
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_FORCED 0x0
+ #define NVM_CFG1_GLOB_ASPM_L1_MODE_DYNAMIC_LOW_LATENCY 0x1
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_MASK 0x01C00000
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_OFFSET 22
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C 0x1
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY 0x2
+ #define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS 0x3
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK \
+ 0x06000000
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET 25
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE 0x0
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL 0x1
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_EXTERNAL 0x2
+ #define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_BOTH 0x3
/* Set the PLDM sensor modes */
-#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_MASK 0x38000000
-#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_OFFSET 27
-#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0
-#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1
-#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2
- u32 f_lane_cfg1; /* 0x38 */
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
- u32 f_lane_cfg2; /* 0x3C */
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_MASK 0x38000000
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_OFFSET 27
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1
+ #define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2
+ u32 f_lane_cfg1; /* 0x38 */
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
+ #define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
+ #define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
+ #define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
+ #define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
+ u32 f_lane_cfg2; /* 0x3C */
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
+ #define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
+ #define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
+ #define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
+ #define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
+ #define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
+ #define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
+ #define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
+ #define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
/* Control the period between two successive checks */
-#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET 8
+ #define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK \
+ 0x0000FF00
+ #define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET 8
/* Set shutdown temperature */
-#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET 16
+ #define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET 16
/* Set max. count for over operational temperature */
-#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK 0xFF000000
-#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_OFFSET 24
- u32 eagle_preemphasis; /* 0x40 */
-#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
- u32 eagle_driver_current; /* 0x44 */
-#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
- u32 falcon_preemphasis; /* 0x48 */
-#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
- u32 falcon_driver_current; /* 0x4C */
-#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
- u32 pci_id; /* 0x50 */
-#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
-#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
+ #define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_OFFSET 24
+ u32 mps10_preemphasis; /* 0x40 */
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+ u32 mps10_driver_current; /* 0x44 */
+ #define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+ u32 mps25_preemphasis; /* 0x48 */
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
+ u32 mps25_driver_current; /* 0x4C */
+ #define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
+ u32 pci_id; /* 0x50 */
+ #define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
+ #define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
/* Set caution temperature */
-#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET 16
+ #define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET 16
/* Set external thermal sensor I2C address */
-#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK 0xFF000000
-#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET 24
- u32 pci_subsys_id; /* 0x54 */
-#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
-#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
-#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
-#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
- u32 bar; /* 0x58 */
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
-#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
-#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
-#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
-#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
-#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
-#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
-#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
-#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
-#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
-#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
-#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
-#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
-#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
-#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
-#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
-#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
-#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
- /* Set the duration, in seconds, fan failure signal should be
- * sampled
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK \
+ 0xFF000000
+ #define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET 24
+ u32 pci_subsys_id; /* 0x54 */
+ #define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
+ #define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
+ #define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
+ #define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
+ u32 bar; /* 0x58 */
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
+ #define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
+ /* BB VF BAR2 size */
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
+ #define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
+ /* BB BAR2 size (global) */
+ #define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
+ #define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
+ #define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
+ #define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
+ #define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
+ #define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
+ #define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
+ #define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
+ #define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
+ #define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
+ #define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
+ #define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
+ #define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
+ #define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
+ #define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
+ #define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
+ /* Set the duration, in secs, fan failure signal should be sampled */
+ #define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_OFFSET 12
+ /* This field defines the board total budget for bar2 when disabled
+ * the regular bar size is used.
*/
-#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_MASK 0x0000F000
-#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_OFFSET 12
- u32 eagle_txfir_main; /* 0x5C */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
- u32 eagle_txfir_post; /* 0x60 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
- u32 falcon_txfir_main; /* 0x64 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
- u32 falcon_txfir_post; /* 0x68 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
- u32 manufacture_ver; /* 0x6C */
-#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
-#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
-#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
-#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
-#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
-#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
-#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
-#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
-#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
-#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
- u32 manufacture_time; /* 0x70 */
-#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
-#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
-#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
-#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
-#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
-#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
- u32 led_global_settings; /* 0x74 */
-#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
-#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
-#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
-#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
-#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
-#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
-#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
-#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
- u32 generic_cont1; /* 0x78 */
-#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
-#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
- u32 mbi_version; /* 0x7C */
-#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
-#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
-#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
-#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
- u32 mbi_date; /* 0x80 */
- u32 misc_sig; /* 0x84 */
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_OFFSET 16
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_DISABLED 0x0
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_64K 0x1
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_128K 0x2
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_256K 0x3
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_512K 0x4
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_1M 0x5
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_2M 0x6
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_4M 0x7
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_8M 0x8
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_16M 0x9
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_32M 0xA
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_64M 0xB
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_128M 0xC
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_256M 0xD
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_512M 0xE
+ #define NVM_CFG1_GLOB_BAR2_TOTAL_BUDGET_1G 0xF
+ /* Enable/Disable Crash dump triggers */
+ #define NVM_CFG1_GLOB_CRASH_DUMP_TRIGGER_ENABLE_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_CRASH_DUMP_TRIGGER_ENABLE_OFFSET 24
+ u32 mps10_txfir_main; /* 0x5C */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+ u32 mps10_txfir_post; /* 0x60 */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+ u32 mps25_txfir_main; /* 0x64 */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
+ u32 mps25_txfir_post; /* 0x68 */
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
+ u32 manufacture_ver; /* 0x6C */
+ #define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
+ #define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
+ #define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
+ #define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
+ #define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
+ #define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
+ #define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
+ #define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
+ #define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
+ #define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+ u32 manufacture_time; /* 0x70 */
+ #define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
+ #define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
+ #define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
+ #define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
+ #define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
+ #define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+ u32 led_global_settings; /* 0x74 */
+ #define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
+ #define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
+ #define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
+ #define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
+ #define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
+ #define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
+ #define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
+ #define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+ u32 generic_cont1; /* 0x78 */
+ #define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
+ #define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE0_SWAP_MASK 0x00000C00
+ #define NVM_CFG1_GLOB_LANE0_SWAP_OFFSET 10
+ #define NVM_CFG1_GLOB_LANE1_SWAP_MASK 0x00003000
+ #define NVM_CFG1_GLOB_LANE1_SWAP_OFFSET 12
+ #define NVM_CFG1_GLOB_LANE2_SWAP_MASK 0x0000C000
+ #define NVM_CFG1_GLOB_LANE2_SWAP_OFFSET 14
+ #define NVM_CFG1_GLOB_LANE3_SWAP_MASK 0x00030000
+ #define NVM_CFG1_GLOB_LANE3_SWAP_OFFSET 16
+ u32 mbi_version; /* 0x7C */
+ #define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+ #define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+ #define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+ u32 mbi_date; /* 0x80 */
+ u32 misc_sig; /* 0x84 */
/* Define the GPIO mapping to switch i2c mux */
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
- u32 device_capabilities; /* 0x88 */
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
- u32 power_dissipated; /* 0x8C */
-#define NVM_CFG1_GLOB_POWER_DIS_D0_MASK 0x000000FF
-#define NVM_CFG1_GLOB_POWER_DIS_D0_OFFSET 0
-#define NVM_CFG1_GLOB_POWER_DIS_D1_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_POWER_DIS_D1_OFFSET 8
-#define NVM_CFG1_GLOB_POWER_DIS_D2_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_POWER_DIS_D2_OFFSET 16
-#define NVM_CFG1_GLOB_POWER_DIS_D3_MASK 0xFF000000
-#define NVM_CFG1_GLOB_POWER_DIS_D3_OFFSET 24
- u32 power_consumed; /* 0x90 */
-#define NVM_CFG1_GLOB_POWER_CONS_D0_MASK 0x000000FF
-#define NVM_CFG1_GLOB_POWER_CONS_D0_OFFSET 0
-#define NVM_CFG1_GLOB_POWER_CONS_D1_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_POWER_CONS_D1_OFFSET 8
-#define NVM_CFG1_GLOB_POWER_CONS_D2_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_POWER_CONS_D2_OFFSET 16
-#define NVM_CFG1_GLOB_POWER_CONS_D3_MASK 0xFF000000
-#define NVM_CFG1_GLOB_POWER_CONS_D3_OFFSET 24
- u32 efi_version; /* 0x94 */
- u32 reserved[42]; /* 0x98 */
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
+ #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+ u32 device_capabilities; /* 0x88 */
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
+ #define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10
+ u32 power_dissipated; /* 0x8C */
+ #define NVM_CFG1_GLOB_POWER_DIS_D0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_POWER_DIS_D0_OFFSET 0
+ #define NVM_CFG1_GLOB_POWER_DIS_D1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_POWER_DIS_D1_OFFSET 8
+ #define NVM_CFG1_GLOB_POWER_DIS_D2_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_POWER_DIS_D2_OFFSET 16
+ #define NVM_CFG1_GLOB_POWER_DIS_D3_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_POWER_DIS_D3_OFFSET 24
+ u32 power_consumed; /* 0x90 */
+ #define NVM_CFG1_GLOB_POWER_CONS_D0_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_POWER_CONS_D0_OFFSET 0
+ #define NVM_CFG1_GLOB_POWER_CONS_D1_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_POWER_CONS_D1_OFFSET 8
+ #define NVM_CFG1_GLOB_POWER_CONS_D2_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_POWER_CONS_D2_OFFSET 16
+ #define NVM_CFG1_GLOB_POWER_CONS_D3_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_POWER_CONS_D3_OFFSET 24
+ u32 efi_version; /* 0x94 */
+ u32 multi_network_modes_capability; /* 0x98 */
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_4X10G 0x1
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_1X25G 0x2
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X25G 0x4
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_4X25G 0x8
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_1X40G 0x10
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X40G 0x20
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X50G 0x40
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_BB_1X100G \
+ 0x80
+ u32 reserved[41]; /* 0x9C */
};
struct nvm_cfg1_path {
- u32 reserved[30]; /* 0x0 */
+ u32 reserved[30]; /* 0x0 */
};
struct nvm_cfg1_port {
- u32 reserved__m_relocated_to_option_123; /* 0x0 */
- u32 reserved__m_relocated_to_option_124; /* 0x4 */
- u32 generic_cont0; /* 0x8 */
-#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
-#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
-#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
-#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
-#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
-#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
-#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
-#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
-#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
-#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
-#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
-#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
-#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
-#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
-#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
-#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
-#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
-#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
-#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
-#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
-#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
-#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
-#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
- u32 pcie_cfg; /* 0xC */
-#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
-#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
- u32 features; /* 0x10 */
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
- u32 speed_cap_mask; /* 0x14 */
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40
- u32 link_settings; /* 0x18 */
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_SMARTLINQ 0x8
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
-#define NVM_CFG1_PORT_AN_25G_50G_OUI_MASK 0x00018000
-#define NVM_CFG1_PORT_AN_25G_50G_OUI_OFFSET 15
-#define NVM_CFG1_PORT_AN_25G_50G_OUI_CONSORTIUM 0x0
-#define NVM_CFG1_PORT_AN_25G_50G_OUI_BAM 0x1
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000E0000
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_NONE 0x0
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_FIRECODE 0x1
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_RS 0x2
- u32 phy_cfg; /* 0x1C */
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
-#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
-#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
-#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
-#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
-#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
-#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
-#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
-#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
-#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
- u32 mgmt_traffic; /* 0x20 */
-#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
-#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
- u32 ext_phy; /* 0x24 */
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
-#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
-#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
- u32 mba_cfg1; /* 0x28 */
-#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
-#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
-#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
-#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
-#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
-#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
-#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
-#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
- u32 mba_cfg2; /* 0x2C */
-#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
-#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
- u32 vf_cfg; /* 0x30 */
-#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
-#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
- struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
- u32 led_port_settings; /* 0x3C */
-#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
-#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
-#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
-#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
-#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
-#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
- u32 transceiver_00; /* 0x40 */
+ u32 reserved__m_relocated_to_option_123; /* 0x0 */
+ u32 reserved__m_relocated_to_option_124; /* 0x4 */
+ u32 generic_cont0; /* 0x8 */
+ #define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
+ #define NVM_CFG1_PORT_LED_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
+ #define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
+ #define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
+ #define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
+ #define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
+ #define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
+ #define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
+ #define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
+ #define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
+ #define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
+ #define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
+ #define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
+ #define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
+ #define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
+ #define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
+ #define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
+ #define NVM_CFG1_PORT_LED_MODE_BREAKOUT 0x10
+ #define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8
+ #define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+ #define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+ #define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+ #define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+ #define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
+ #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+ u32 pcie_cfg; /* 0xC */
+ #define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
+ #define NVM_CFG1_PORT_RESERVED15_OFFSET 0
+ u32 features; /* 0x10 */
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
+ #define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
+ #define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
+ u32 speed_cap_mask; /* 0x14 */
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 link_settings; /* 0x18 */
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+ #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
+ #define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK \
+ 0x00004000
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED \
+ 0x0
+ #define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED \
+ 0x1
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_MASK 0x00018000
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_OFFSET 15
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_CONSORTIUM 0x0
+ #define NVM_CFG1_PORT_AN_25G_50G_OUI_BAM 0x1
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000E0000
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+ u32 phy_cfg; /* 0x1C */
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
+ #define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
+ #define NVM_CFG1_PORT_AN_MODE_OFFSET 24
+ #define NVM_CFG1_PORT_AN_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_AN_MODE_CL73 0x1
+ #define NVM_CFG1_PORT_AN_MODE_CL37 0x2
+ #define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
+ #define NVM_CFG1_PORT_AN_MODE_BB_CL37_BAM 0x4
+ #define NVM_CFG1_PORT_AN_MODE_BB_HPAM 0x5
+ #define NVM_CFG1_PORT_AN_MODE_BB_SGMII 0x6
+ u32 mgmt_traffic; /* 0x20 */
+ #define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
+ #define NVM_CFG1_PORT_RESERVED61_OFFSET 0
+ u32 ext_phy; /* 0x24 */
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+ u32 mba_cfg1; /* 0x28 */
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
+ #define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
+ #define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
+ #define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
+ #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
+ #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
+ #define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
+ #define NVM_CFG1_PORT_RESERVED5_OFFSET 9
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK \
+ 0x00E00000
+ #define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
+ u32 mba_cfg2; /* 0x2C */
+ #define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_RESERVED65_OFFSET 0
+ #define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
+ #define NVM_CFG1_PORT_RESERVED66_OFFSET 16
+ u32 vf_cfg; /* 0x30 */
+ #define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
+ #define NVM_CFG1_PORT_RESERVED8_OFFSET 0
+ #define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
+ #define NVM_CFG1_PORT_RESERVED6_OFFSET 16
+ struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
+ u32 led_port_settings; /* 0x3C */
+ #define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
+ #define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
+ #define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
+ #define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_100G 0x40
+ u32 transceiver_00; /* 0x40 */
/* Define for mapping of transceiver signal module absent */
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
+ #define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
/* Define the GPIO mux settings to switch i2c mux to this port */
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
- u32 device_ids; /* 0x44 */
-#define NVM_CFG1_PORT_ETH_DID_SUFFIX_MASK 0x000000FF
-#define NVM_CFG1_PORT_ETH_DID_SUFFIX_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_MASK 0xFF000000
-#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_OFFSET 24
- u32 board_cfg; /* 0x48 */
- /* This field defines the board technology
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
+ #define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
+ u32 device_ids; /* 0x44 */
+ #define NVM_CFG1_PORT_ETH_DID_SUFFIX_MASK 0x000000FF
+ #define NVM_CFG1_PORT_ETH_DID_SUFFIX_OFFSET 0
+ #define NVM_CFG1_PORT_FCOE_DID_SUFFIX_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_FCOE_DID_SUFFIX_OFFSET 8
+ #define NVM_CFG1_PORT_ISCSI_DID_SUFFIX_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_ISCSI_DID_SUFFIX_OFFSET 16
+ #define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_OFFSET 24
+ u32 board_cfg; /* 0x48 */
+ /* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
-#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF
-#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
-#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
-#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
-#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF
+ #define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
+ #define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
/* This field defines the GPIO mapped to tx_disable signal in SFP */
-#define NVM_CFG1_PORT_TX_DISABLE_MASK 0x0000FF00
-#define NVM_CFG1_PORT_TX_DISABLE_OFFSET 8
-#define NVM_CFG1_PORT_TX_DISABLE_NA 0x0
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO0 0x1
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO1 0x2
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO2 0x3
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO3 0x4
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO4 0x5
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO5 0x6
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO6 0x7
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO7 0x8
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO8 0x9
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO9 0xA
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO10 0xB
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO11 0xC
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO12 0xD
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO13 0xE
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO14 0xF
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO15 0x10
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO16 0x11
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO17 0x12
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO18 0x13
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO19 0x14
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO20 0x15
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO21 0x16
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO22 0x17
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO23 0x18
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO24 0x19
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO25 0x1A
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO26 0x1B
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO27 0x1C
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO28 0x1D
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO29 0x1E
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO30 0x1F
-#define NVM_CFG1_PORT_TX_DISABLE_GPIO31 0x20
- u32 reserved[131]; /* 0x4C */
+ #define NVM_CFG1_PORT_TX_DISABLE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_TX_DISABLE_OFFSET 8
+ #define NVM_CFG1_PORT_TX_DISABLE_NA 0x0
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO0 0x1
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO1 0x2
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO2 0x3
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO3 0x4
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO4 0x5
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO5 0x6
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO6 0x7
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO7 0x8
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO8 0x9
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO9 0xA
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO10 0xB
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO11 0xC
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO12 0xD
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO13 0xE
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO14 0xF
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO15 0x10
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO16 0x11
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO17 0x12
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO18 0x13
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO19 0x14
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO20 0x15
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO21 0x16
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO22 0x17
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO23 0x18
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO24 0x19
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO25 0x1A
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO26 0x1B
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO27 0x1C
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO28 0x1D
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO29 0x1E
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO30 0x1F
+ #define NVM_CFG1_PORT_TX_DISABLE_GPIO31 0x20
+ u32 mnm_10g_cap; /* 0x4C */
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 mnm_10g_ctrl; /* 0x50 */
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_SMARTLINQ 0x8
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_10G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_10G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_10G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_10G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_10g_misc; /* 0x54 */
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_RS 0x2
+ u32 mnm_25g_cap; /* 0x58 */
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 mnm_25g_ctrl; /* 0x5C */
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_SMARTLINQ 0x8
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_25G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_25G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_25G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_25G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_25g_misc; /* 0x60 */
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_RS 0x2
+ u32 mnm_40g_cap; /* 0x64 */
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 mnm_40g_ctrl; /* 0x68 */
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_SMARTLINQ 0x8
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_40G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_40G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_40G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_40G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_40g_misc; /* 0x6C */
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_RS 0x2
+ u32 mnm_50g_cap; /* 0x70 */
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_BB_100G \
+ 0x40
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_OFFSET \
+ 16
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
+ #define \
+ NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_BB_100G \
+ 0x40
+ u32 mnm_50g_ctrl; /* 0x74 */
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_SMARTLINQ 0x8
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_50G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_50G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_50G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_50G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_50g_misc; /* 0x78 */
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_RS 0x2
+ u32 mnm_100g_cap; /* 0x7C */
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_MASK \
+ 0x0000FFFF
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_50G 0x20
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_BB_100G 0x40
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_MASK \
+ 0xFFFF0000
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_25G 0x8
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_40G 0x10
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_50G 0x20
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_BB_100G 0x40
+ u32 mnm_100g_ctrl; /* 0x80 */
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_MASK 0x0000000F
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_SMARTLINQ 0x8
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_MASK 0x000000F0
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_OFFSET 4
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_AUTONEG 0x0
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_1G 0x1
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_25G 0x4
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_40G 0x5
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_50G 0x6
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_BB_100G 0x7
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_SMARTLINQ 0x8
+ /* This field defines the board technology
+ * (backpane,transceiver,external PHY)
+ */
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MASK 0x0000FF00
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_OFFSET 8
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_UNDEFINED 0x0
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MODULE 0x1
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_BACKPLANE 0x2
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_EXT_PHY 0x3
+ #define NVM_CFG1_PORT_MNM_100G_PORT_TYPE_MODULE_SLAVE 0x4
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_MASK \
+ 0x00FF0000
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_OFFSET 16
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_BYPASS 0x0
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR 0x2
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR2 0x3
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_KR4 0x4
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XFI 0x8
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_SFI 0x9
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_1000X 0xB
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_SGMII 0xC
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XLAUI 0x11
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_XLPPI 0x12
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_CAUI 0x21
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_CPPI 0x22
+ #define NVM_CFG1_PORT_MNM_100G_SERDES_NET_INTERFACE_25GAUI 0x31
+ #define NVM_CFG1_PORT_MNM_100G_ETH_DID_SUFFIX_MASK 0xFF000000
+ #define NVM_CFG1_PORT_MNM_100G_ETH_DID_SUFFIX_OFFSET 24
+ u32 mnm_100g_misc; /* 0x84 */
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_MASK 0x00000007
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_OFFSET 0
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_FIRECODE 0x1
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_RS 0x2
+ u32 reserved[116]; /* 0x88 */
};
struct nvm_cfg1_func {
- struct nvm_cfg_mac_address mac_address; /* 0x0 */
- u32 rsrv1; /* 0x8 */
-#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
- u32 rsrv2; /* 0xC */
-#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
- u32 device_id; /* 0x10 */
-#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
- u32 cmn_cfg; /* 0x14 */
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
-#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
-#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
-#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
-#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
-#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
-#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
-#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
- u32 pci_cfg; /* 0x18 */
-#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
-#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
-#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
-#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
-#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
-#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
-#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
-#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
-#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
-#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
-#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
-#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
-#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
-#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
-#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
-#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
-#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
-#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
-#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
-#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
-#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
-#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
-#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
- u32 preboot_generic_cfg; /* 0x2C */
-#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0
-#define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000
-#define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16
- u32 reserved[8]; /* 0x30 */
+ struct nvm_cfg_mac_address mac_address; /* 0x0 */
+ u32 rsrv1; /* 0x8 */
+ #define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
+ #define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
+ #define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
+ u32 rsrv2; /* 0xC */
+ #define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
+ #define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
+ #define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
+ u32 device_id; /* 0x10 */
+ #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
+ #define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
+ #define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
+ u32 cmn_cfg; /* 0x14 */
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4
+ #define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
+ #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
+ #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
+ #define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
+ #define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
+ #define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
+ #define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
+ #define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
+ #define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
+ #define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
+ #define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
+ #define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
+ u32 pci_cfg; /* 0x18 */
+ #define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
+ #define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
+ /* AH VF BAR2 size */
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_MASK 0x00003F80
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_OFFSET 7
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_4K 0x1
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_8K 0x2
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_16K 0x3
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_32K 0x4
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_64K 0x5
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_128K 0x6
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_256K 0x7
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_512K 0x8
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_1M 0x9
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_2M 0xA
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_4M 0xB
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_8M 0xC
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_16M 0xD
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_32M 0xE
+ #define NVM_CFG1_FUNC_VF_PCI_BAR2_SIZE_64M 0xF
+ #define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
+ #define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
+ #define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
+ #define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
+ #define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
+ #define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
+ #define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
+ #define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
+ #define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
+ #define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
+ #define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
+ #define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
+ #define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
+ #define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
+ #define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
+ #define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
+ #define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
+ #define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
+ #define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
+ /* Hide function in npar mode */
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_MASK 0x04000000
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_OFFSET 26
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_FUNCTION_HIDE_ENABLED 0x1
+ /* AH BAR2 size (per function) */
+ #define NVM_CFG1_FUNC_BAR2_SIZE_MASK 0x78000000
+ #define NVM_CFG1_FUNC_BAR2_SIZE_OFFSET 27
+ #define NVM_CFG1_FUNC_BAR2_SIZE_DISABLED 0x0
+ #define NVM_CFG1_FUNC_BAR2_SIZE_1M 0x5
+ #define NVM_CFG1_FUNC_BAR2_SIZE_2M 0x6
+ #define NVM_CFG1_FUNC_BAR2_SIZE_4M 0x7
+ #define NVM_CFG1_FUNC_BAR2_SIZE_8M 0x8
+ #define NVM_CFG1_FUNC_BAR2_SIZE_16M 0x9
+ #define NVM_CFG1_FUNC_BAR2_SIZE_32M 0xA
+ #define NVM_CFG1_FUNC_BAR2_SIZE_64M 0xB
+ #define NVM_CFG1_FUNC_BAR2_SIZE_128M 0xC
+ #define NVM_CFG1_FUNC_BAR2_SIZE_256M 0xD
+ #define NVM_CFG1_FUNC_BAR2_SIZE_512M 0xE
+ #define NVM_CFG1_FUNC_BAR2_SIZE_1G 0xF
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
+ u32 preboot_generic_cfg; /* 0x2C */
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_MASK 0x0000FFFF
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000
+ #define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16
+ u32 reserved[8]; /* 0x30 */
};
struct nvm_cfg1 {
- struct nvm_cfg1_glob glob; /* 0x0 */
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
- struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
- struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
+ struct nvm_cfg1_glob glob; /* 0x0 */
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
};
/******************************************
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 3b25e1a8..ab886716 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -6,6 +6,14 @@
* See LICENSE.qede_pmd for copyright and licensing details.
*/
+/*
+ * Copyright (c) 2016 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
0
@@ -1105,3 +1113,31 @@
#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 24
#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 16
#define DORQ_REG_DB_DROP_DETAILS_ADDRESS 0x100a1cUL
+
+/* 8.10.9.0 FW */
+#define NIG_REG_VXLAN_CTRL 0x50105cUL
+#define PRS_REG_SEARCH_ROCE 0x1f040cUL
+#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
+#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0
+#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8
+#define CCFC_REG_WEAK_ENABLE_VF 0x2e0704UL
+#define TCFC_REG_STRONG_ENABLE_VF 0x2d070cUL
+#define TCFC_REG_WEAK_ENABLE_VF 0x2d0704UL
+#define PRS_REG_SEARCH_GFT 0x1f11bcUL
+#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL
+#define PRS_REG_GFT_CAM 0x1f1100UL
+#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
+#define PGLUE_B_REG_MSDM_VF_SHIFT_B 0x2aa1c4UL
+#define PGLUE_B_REG_MSDM_OFFSET_MASK_B 0x2aa1c0UL
+#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST 0x1f0a0cUL
+#define PRS_REG_SEARCH_FCOE 0x1f0408UL
+#define PGLUE_B_REG_PGL_ADDR_E8_F0 0x2aaf98UL
+#define NIG_REG_DSCP_TO_TC_MAP_ENABLE 0x5088f8UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0 0x2aafa0UL
+#define PRS_REG_ROCE_DEST_QP_MAX_PF 0x1f0430UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0 0x2aafa4UL
+#define IGU_REG_WRITE_DONE_PENDING 0x180900UL
+#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR 0x50196cUL
+#define PRS_REG_MSG_INFO 0x1f0a1cUL
+#define BAR0_MAP_REG_XSDM_RAM 0x1e00000UL
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
index b6f6487d..1ae6127b 100644
--- a/drivers/net/qede/qede_eth_if.c
+++ b/drivers/net/qede/qede_eth_if.c
@@ -40,8 +40,6 @@ qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
return rc;
}
- ecore_hw_start_fastpath(p_hwfn);
-
DP_VERBOSE(edev, ECORE_MSG_SPQ,
"Started V-PORT %d with MTU %d\n",
p_params->vport_id, p_params->mtu);
@@ -94,6 +92,7 @@ qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
sp_params.accept_any_vlan = params->accept_any_vlan;
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
+ sp_params.mtu = params->mtu;
/* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
* We need to re-fix the rss values per engine for CMT.
@@ -144,8 +143,8 @@ qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
ECORE_RSS_IND_TABLE_SIZE * sizeof(uint16_t));
rte_memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
ECORE_RSS_KEY_SIZE * sizeof(uint32_t));
+ sp_params.rss_params = &sp_rss_params;
}
- sp_params.rss_params = &sp_rss_params;
for_each_hwfn(edev, i) {
struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
@@ -169,9 +168,9 @@ qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
static int
qed_start_rxq(struct ecore_dev *edev,
- uint8_t rss_id, uint8_t rx_queue_id,
- uint8_t vport_id, uint16_t sb,
- uint8_t sb_index, uint16_t bd_max_bytes,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
+ uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
@@ -179,28 +178,28 @@ qed_start_rxq(struct ecore_dev *edev,
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
- hwfn_index = rss_id % edev->num_hwfns;
+ hwfn_index = rss_num % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
+ p_params->queue_id = p_params->queue_id / edev->num_hwfns;
+ p_params->stats_id = p_params->vport_id;
+
rc = ecore_sp_eth_rx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
- rx_queue_id / edev->num_hwfns,
- vport_id,
- vport_id,
- sb,
- sb_index,
+ p_params,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr, cqe_pbl_size, pp_prod);
if (rc) {
- DP_ERR(edev, "Failed to start RXQ#%d\n", rx_queue_id);
+ DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
return rc;
}
DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
- rx_queue_id, rss_id, vport_id, sb);
+ "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, rss_num, p_params->vport_id,
+ p_params->sb);
return 0;
}
@@ -227,35 +226,35 @@ qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
static int
qed_start_txq(struct ecore_dev *edev,
- uint8_t rss_id, uint16_t tx_queue_id,
- uint8_t vport_id, uint16_t sb,
- uint8_t sb_index,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
- hwfn_index = rss_id % edev->num_hwfns;
+ hwfn_index = rss_num % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
+ p_params->queue_id = p_params->queue_id / edev->num_hwfns;
+ p_params->stats_id = p_params->vport_id;
+
rc = ecore_sp_eth_tx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
- tx_queue_id / edev->num_hwfns,
- vport_id,
- vport_id,
- sb,
- sb_index,
+ p_params,
+ 0 /* tc */,
pbl_addr, pbl_size, pp_doorbell);
if (rc) {
- DP_ERR(edev, "Failed to start TXQ#%d\n", tx_queue_id);
+ DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
return rc;
}
DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
- tx_queue_id, rss_id, vport_id, sb);
+ "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, rss_num, p_params->vport_id,
+ p_params->sb);
return 0;
}
@@ -294,6 +293,17 @@ static int qed_fastpath_stop(struct ecore_dev *edev)
return 0;
}
+static void qed_fastpath_start(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ ecore_hw_start_fastpath(p_hwfn);
+ }
+}
+
static void
qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
{
@@ -443,20 +453,11 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
+ INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
INIT_STRUCT_FIELD(filter_config, &qed_configure_filter),
};
-uint32_t qed_get_protocol_version(enum qed_protocol protocol)
-{
- switch (protocol) {
- case QED_PROTOCOL_ETH:
- return QED_ETH_INTERFACE_VERSION;
- default:
- return 0;
- }
-}
-
const struct qed_eth_ops *qed_get_eth_ops(void)
{
return &qed_eth_ops_pass;
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
index 26968eb0..33655c39 100644
--- a/drivers/net/qede/qede_eth_if.h
+++ b/drivers/net/qede/qede_eth_if.h
@@ -46,7 +46,7 @@ struct qed_dev_eth_info {
uint8_t num_tc;
struct ether_addr port_mac;
- uint8_t num_vlan_filters;
+ uint16_t num_vlan_filters;
uint32_t num_mac_addrs;
};
@@ -75,6 +75,7 @@ struct qed_update_vport_params {
uint8_t accept_any_vlan;
uint8_t update_rss_flg;
struct qed_update_vport_rss_params rss_params;
+ uint16_t mtu;
};
struct qed_start_vport_params {
@@ -132,9 +133,9 @@ struct qed_eth_ops {
struct qed_update_vport_params *params);
int (*q_rx_start)(struct ecore_dev *cdev,
- uint8_t rss_id, uint8_t rx_queue_id,
- uint8_t vport_id, uint16_t sb,
- uint8_t sb_index, uint16_t bd_max_bytes,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
+ uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod);
@@ -143,9 +144,8 @@ struct qed_eth_ops {
struct qed_stop_rxq_params *params);
int (*q_tx_start)(struct ecore_dev *edev,
- uint8_t rss_id, uint16_t tx_queue_id,
- uint8_t vport_id, uint16_t sb,
- uint8_t sb_index,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell);
@@ -158,6 +158,8 @@ struct qed_eth_ops {
int (*fastpath_stop)(struct ecore_dev *edev);
+ void (*fastpath_start)(struct ecore_dev *edev);
+
void (*get_vport_stats)(struct ecore_dev *edev,
struct ecore_eth_stats *stats);
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 82e44b8f..d106dd0f 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -8,6 +8,7 @@
#include "qede_ethdev.h"
#include <rte_alarm.h>
+#include <rte_version.h>
/* Globals */
static const struct qed_eth_ops *qed_ops;
@@ -159,6 +160,15 @@ static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
};
+static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
+ {"rx_q_segments",
+ offsetof(struct qede_rx_queue, rx_segs)},
+ {"rx_q_hw_errors",
+ offsetof(struct qede_rx_queue, rx_hw_errors)},
+ {"rx_q_allocation_errors",
+ offsetof(struct qede_rx_queue, rx_alloc_errors)}
+};
+
static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
{
ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
@@ -188,31 +198,28 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
{
struct ecore_dev *edev = &qdev->edev;
struct qed_dev_info *info = &qdev->dev_info.common;
- static char ver_str[QED_DRV_VER_STR_SIZE];
+ static char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
+ static char ver_str[QEDE_PMD_DRV_VER_STR_SIZE];
DP_INFO(edev, "*********************************\n");
+ DP_INFO(edev, " DPDK version:%s\n", rte_version());
DP_INFO(edev, " Chip details : %s%d\n",
- ECORE_IS_BB(edev) ? "BB" : "AH",
- CHIP_REV_IS_A0(edev) ? 0 : 1);
-
- sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX,
- edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR,
- QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH);
- strcpy(qdev->drv_ver, ver_str);
- DP_INFO(edev, " Driver version : %s\n", ver_str);
-
- sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor,
- info->fw_rev, info->fw_eng);
+ ECORE_IS_BB(edev) ? "BB" : "AH",
+ CHIP_REV_IS_A0(edev) ? 0 : 1);
+ snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
+ info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
+ snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
+ ver_str, QEDE_PMD_VERSION);
+ DP_INFO(edev, " Driver version : %s\n", drv_ver);
DP_INFO(edev, " Firmware version : %s\n", ver_str);
- sprintf(ver_str, "%d.%d.%d.%d",
+ snprintf(ver_str, MCP_DRV_VER_STR_SIZE,
+ "%d.%d.%d.%d",
(info->mfw_rev >> 24) & 0xff,
(info->mfw_rev >> 16) & 0xff,
(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
- DP_INFO(edev, " Management firmware version : %s\n", ver_str);
-
+ DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
DP_INFO(edev, " Firmware file : %s\n", fw_file);
-
DP_INFO(edev, "*********************************\n");
}
@@ -348,52 +355,6 @@ static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
}
}
-void qede_config_rx_mode(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- /* TODO: - QED_FILTER_TYPE_UCAST */
- enum qed_filter_rx_mode_type accept_flags =
- QED_FILTER_RX_MODE_TYPE_REGULAR;
- struct qed_filter_params rx_mode;
- int rc;
-
- /* Configure the struct for the Rx mode */
- memset(&rx_mode, 0, sizeof(struct qed_filter_params));
- rx_mode.type = QED_FILTER_TYPE_RX_MODE;
-
- rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE,
- eth_dev->data->mac_addrs[0].addr_bytes);
- if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
- accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
- } else {
- rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
- eth_dev->data->
- mac_addrs[0].addr_bytes);
- if (rc) {
- DP_ERR(edev, "Unable to add filter\n");
- return;
- }
- }
-
- /* take care of VLAN mode */
- if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) {
- qede_config_accept_any_vlan(qdev, true);
- } else if (!qdev->non_configured_vlans) {
- /* If we dont have non-configured VLANs and promisc
- * is not set, then check if we need to disable
- * accept_any_vlan mode.
- * Because in this case, accept_any_vlan mode is set
- * as part of IFF_RPOMISC flag handling.
- */
- qede_config_accept_any_vlan(qdev, false);
- }
- rx_mode.filter.accept_flags = accept_flags;
- rc = qdev->ops->filter_config(edev, &rx_mode);
- if (rc)
- DP_ERR(edev, "Filter config failed rc=%d\n", rc);
-}
-
static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
{
struct qed_update_vport_params vport_update_params;
@@ -418,16 +379,40 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
if (mask & ETH_VLAN_STRIP_MASK) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (rxmode->hw_vlan_strip)
(void)qede_vlan_stripping(eth_dev, 1);
else
(void)qede_vlan_stripping(eth_dev, 0);
}
- DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n",
- mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip);
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* VLAN filtering kicks in when a VLAN is added */
+ if (rxmode->hw_vlan_filter) {
+ qede_vlan_filter_set(eth_dev, 0, 1);
+ } else {
+ if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
+ DP_NOTICE(edev, false,
+ " Please remove existing VLAN filters"
+ " before disabling VLAN filtering\n");
+ /* Signal app that VLAN filtering is still
+ * enabled
+ */
+ rxmode->hw_vlan_filter = true;
+ } else {
+ qede_vlan_filter_set(eth_dev, 0, 0);
+ }
+ }
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
+ " and classification is based on outer tag only\n");
+
+ DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
+ mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
}
static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
@@ -452,85 +437,137 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct qed_dev_eth_info *dev_info = &qdev->dev_info;
+ struct qede_vlan_entry *tmp = NULL;
+ struct qede_vlan_entry *vlan;
int rc;
- if (vlan_id != 0 &&
- qdev->configured_vlans == dev_info->num_vlan_filters) {
- DP_NOTICE(edev, false, "Reached max VLAN filter limit"
- " enabling accept_any_vlan\n");
- qede_config_accept_any_vlan(qdev, true);
- return 0;
- }
-
if (on) {
+ if (qdev->configured_vlans == dev_info->num_vlan_filters) {
+ DP_INFO(edev, "Reached max VLAN filter limit"
+ " enabling accept_any_vlan\n");
+ qede_config_accept_any_vlan(qdev, true);
+ return 0;
+ }
+
+ SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
+ if (tmp->vid == vlan_id) {
+ DP_ERR(edev, "VLAN %u already configured\n",
+ vlan_id);
+ return -EEXIST;
+ }
+ }
+
+ vlan = rte_malloc(NULL, sizeof(struct qede_vlan_entry),
+ RTE_CACHE_LINE_SIZE);
+
+ if (!vlan) {
+ DP_ERR(edev, "Did not allocate memory for VLAN\n");
+ return -ENOMEM;
+ }
+
rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
vlan_id);
- if (rc)
+ if (rc) {
DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
rc);
- else
- if (vlan_id != 0)
- qdev->configured_vlans++;
+ rte_free(vlan);
+ } else {
+ vlan->vid = vlan_id;
+ SLIST_INSERT_HEAD(&qdev->vlan_list_head, vlan, list);
+ qdev->configured_vlans++;
+ DP_INFO(edev, "VLAN %u added, configured_vlans %u\n",
+ vlan_id, qdev->configured_vlans);
+ }
} else {
+ SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) {
+ if (tmp->vid == vlan_id)
+ break;
+ }
+
+ if (!tmp) {
+ if (qdev->configured_vlans == 0) {
+ DP_INFO(edev,
+ "No VLAN filters configured yet\n");
+ return 0;
+ }
+
+ DP_ERR(edev, "VLAN %u not configured\n", vlan_id);
+ return -EINVAL;
+ }
+
+ SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
+
rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
vlan_id);
- if (rc)
+ if (rc) {
DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
vlan_id, rc);
- else
- if (vlan_id != 0)
- qdev->configured_vlans--;
+ } else {
+ qdev->configured_vlans--;
+ DP_INFO(edev, "VLAN %u removed configured_vlans %u\n",
+ vlan_id, qdev->configured_vlans);
+ }
}
- DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n",
- vlan_id, on, rc, qdev->configured_vlans);
-
return rc;
}
+static int qede_init_vport(struct qede_dev *qdev)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ struct qed_start_vport_params start = {0};
+ int rc;
+
+ start.remove_inner_vlan = 1;
+ start.gro_enable = 0;
+ start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
+ start.vport_id = 0;
+ start.drop_ttl0 = false;
+ start.clear_stats = 1;
+ start.handle_ptp_pkts = 0;
+
+ rc = qdev->ops->vport_start(edev, &start);
+ if (rc) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+
+ DP_INFO(edev,
+ "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
+ start.vport_id, ETHER_MTU);
+
+ return 0;
+}
+
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+ int rc, i, j;
PMD_INIT_FUNC_TRACE(edev);
- if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) {
- DP_NOTICE(edev, false,
- "Unequal number of rx/tx queues "
- "is not supported RX=%u TX=%u\n",
- eth_dev->data->nb_rx_queues,
- eth_dev->data->nb_tx_queues);
- return -EINVAL;
- }
-
/* Check requirements for 100G mode */
if (edev->num_hwfns > 1) {
- if (eth_dev->data->nb_rx_queues < 2) {
+ if (eth_dev->data->nb_rx_queues < 2 ||
+ eth_dev->data->nb_tx_queues < 2) {
DP_NOTICE(edev, false,
- "100G mode requires minimum two queues\n");
+ "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
- if ((eth_dev->data->nb_rx_queues % 2) != 0) {
+ if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
+ (eth_dev->data->nb_tx_queues % 2 != 0)) {
DP_NOTICE(edev, false,
- "100G mode requires even number of queues\n");
+ "100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
}
- qdev->num_rss = eth_dev->data->nb_rx_queues;
-
- /* Initial state */
- qdev->state = QEDE_CLOSE;
-
/* Sanity checks and throw warnings */
-
- if (rxmode->enable_scatter == 1) {
- DP_ERR(edev, "RX scatter packets is not supported\n");
- return -EINVAL;
- }
+ if (rxmode->enable_scatter == 1)
+ eth_dev->data->scattered_rx = 1;
if (rxmode->enable_lro == 1) {
DP_INFO(edev, "LRO is not supported\n");
@@ -544,16 +581,48 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
"in hw\n");
+ /* Check for the port restart case */
+ if (qdev->state != QEDE_DEV_INIT) {
+ rc = qdev->ops->vport_stop(edev, 0);
+ if (rc != 0)
+ return rc;
+ qede_dealloc_fp_resc(eth_dev);
+ }
- DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
- QEDE_RSS_CNT(qdev), qdev->num_tc);
+ qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
+ qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
+ qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
- DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u"
- " port %u first_on_engine %d\n",
- edev->hwfns[0].my_id,
- edev->hwfns[0].rel_pf_id,
- edev->hwfns[0].abs_pf_id,
- edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine);
+ /* Fastpath status block should be initialized before sending
+ * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
+ */
+ rc = qede_alloc_fp_resc(qdev);
+ if (rc != 0)
+ return rc;
+
+ /* Issue VPORT-START with default config values to allow
+ * other port configurations early on.
+ */
+ rc = qede_init_vport(qdev);
+ if (rc != 0)
+ return rc;
+
+ SLIST_INIT(&qdev->vlan_list_head);
+
+ /* Add primary mac for PF */
+ if (IS_PF(edev))
+ qede_mac_addr_set(eth_dev, &qdev->primary_mac);
+
+ /* Enable VLAN offloads by default */
+ qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
+
+ qdev->state = QEDE_DEV_CONFIG;
+
+ DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
+ (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
+ qdev->num_tc);
return 0;
}
@@ -577,6 +646,8 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ struct qed_link_output link;
+ uint32_t speed_cap = 0;
PMD_INIT_FUNC_TRACE(edev);
@@ -609,7 +680,21 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
- dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+ memset(&link, 0, sizeof(struct qed_link_output));
+ qdev->ops->common->get_link(edev, &link);
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ speed_cap |= ETH_LINK_SPEED_1G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ speed_cap |= ETH_LINK_SPEED_10G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ speed_cap |= ETH_LINK_SPEED_25G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ speed_cap |= ETH_LINK_SPEED_40G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ speed_cap |= ETH_LINK_SPEED_50G;
+ if (link.adv_speed & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ speed_cap |= ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = speed_cap;
}
/* return 0 means link status changed, -1 means not changed */
@@ -724,8 +809,9 @@ static void qede_poll_sp_sb_cb(void *param)
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rc;
PMD_INIT_FUNC_TRACE(edev);
@@ -734,16 +820,16 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
* by the app without reconfiguration. However, in dev_close() we
* can release all the resources and device can be brought up newly
*/
- if (qdev->state != QEDE_STOP)
+ if (qdev->state != QEDE_DEV_STOP)
qede_dev_stop(eth_dev);
else
DP_INFO(edev, "Device is already stopped\n");
- qede_free_mem_load(qdev);
-
- qede_free_fp_arrays(qdev);
+ rc = qdev->ops->vport_stop(edev, 0);
+ if (rc != 0)
+ DP_ERR(edev, "Failed to stop VPORT\n");
- qede_dev_set_link_state(eth_dev, false);
+ qede_dealloc_fp_resc(eth_dev);
qdev->ops->common->slowpath_stop(edev);
@@ -757,7 +843,7 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
if (edev->num_hwfns > 1)
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
- qdev->state = QEDE_CLOSE;
+ qdev->state = QEDE_DEV_INIT; /* Go back to init state */
}
static void
@@ -766,6 +852,8 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
+ unsigned int i = 0, j = 0, qid;
+ struct qede_tx_queue *txq;
qdev->ops->get_vport_stats(edev, &stats);
@@ -796,20 +884,73 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
stats.tx_mcast_bytes + stats.tx_bcast_bytes;
eth_stats->oerrors = stats.tx_err_drop_pkts;
+
+ /* Queue stats */
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ eth_stats->q_ipackets[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rcv_pkts));
+ eth_stats->q_errors[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_hw_errors)) +
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_alloc_errors));
+ i++;
+ }
+
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
+ txq = qdev->fp_array[(qid)].txqs[0];
+ eth_stats->q_opackets[j] =
+ *((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue,
+ xmit_pkts)));
+ j++;
+ }
+ }
+}
+
+static unsigned
+qede_get_xstats_count(struct qede_dev *qdev) {
+ return RTE_DIM(qede_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev));
}
static int
qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, unsigned limit)
{
- unsigned int i, stat_cnt = RTE_DIM(qede_xstats_strings);
+ struct qede_dev *qdev = dev->data->dev_private;
+ const unsigned int stat_cnt = qede_get_xstats_count(qdev);
+ unsigned int i, qid, stat_idx = 0;
- if (xstats_names != NULL)
- for (i = 0; i < stat_cnt; i++)
- snprintf(xstats_names[i].name,
- sizeof(xstats_names[i].name),
+ if (xstats_names != NULL) {
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
"%s",
qede_xstats_strings[i].name);
+ stat_idx++;
+ }
+
+ for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) {
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%.4s%d%s",
+ qede_rxq_xstats_strings[i].name, qid,
+ qede_rxq_xstats_strings[i].name + 4);
+ stat_idx++;
+ }
+ }
+ }
return stat_cnt;
}
@@ -821,18 +962,32 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
- unsigned int num = RTE_DIM(qede_xstats_strings);
+ const unsigned int num = qede_get_xstats_count(qdev);
+ unsigned int i, qid, stat_idx = 0;
if (n < num)
return num;
qdev->ops->get_vport_stats(edev, &stats);
- for (num = 0; num < n; num++)
- xstats[num].value = *(u64 *)(((char *)&stats) +
- qede_xstats_strings[num].offset);
+ for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
+ qede_xstats_strings[i].offset);
+ stat_idx++;
+ }
+
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
+ xstats[stat_idx].value = *(uint64_t *)(
+ ((char *)(qdev->fp_array[(qid)].rxq)) +
+ qede_rxq_xstats_strings[i].offset);
+ stat_idx++;
+ }
+ }
+ }
- return num;
+ return stat_idx;
}
static void
@@ -975,42 +1130,51 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
-int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_conf *rss_conf)
+void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
+{
+ *rss_caps = 0;
+ *rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+}
+
+static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
{
struct qed_update_vport_params vport_update_params;
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- uint8_t rss_caps;
uint32_t *key = (uint32_t *)rss_conf->rss_key;
uint64_t hf = rss_conf->rss_hf;
int i;
- if (hf == 0)
- DP_ERR(edev, "hash function 0 will disable RSS\n");
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
- rss_caps = 0;
- rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
- rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
- rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
- rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
- rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
- rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ if (hf != 0) {
+ /* Enable RSS */
+ qede_init_rss_caps(&qdev->rss_params.rss_caps, hf);
+ memcpy(&vport_update_params.rss_params, &qdev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ if (key)
+ memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
+ rss_conf->rss_key_len);
+ vport_update_params.update_rss_flg = 1;
+ qdev->rss_enabled = 1;
+ } else {
+ /* Disable RSS */
+ qdev->rss_enabled = 0;
+ }
/* If the mapping doesn't fit any supported, return */
- if (rss_caps == 0 && hf != 0)
+ if (qdev->rss_params.rss_caps == 0 && hf != 0)
return -EINVAL;
- memset(&vport_update_params, 0, sizeof(vport_update_params));
-
- if (key != NULL)
- memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
- rss_conf->rss_key_len);
+ DP_INFO(edev, "%s\n", (vport_update_params.update_rss_flg) ?
+ "Enabling RSS" : "Disabling RSS");
- qdev->rss_params.rss_caps = rss_caps;
- memcpy(&vport_update_params.rss_params, &qdev->rss_params,
- sizeof(vport_update_params.rss_params));
- vport_update_params.update_rss_flg = 1;
vport_update_params.vport_id = 0;
return qdev->ops->vport_update(edev, &vport_update_params);
@@ -1048,9 +1212,9 @@ int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
return 0;
}
-int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
+static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
{
struct qed_update_vport_params vport_update_params;
struct qede_dev *qdev = eth_dev->data->dev_private;
@@ -1225,7 +1389,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
struct ecore_dev *edev;
struct qed_dev_eth_info dev_info;
struct qed_slowpath_params params;
- uint32_t qed_ver;
static bool do_once = true;
uint8_t bulletin_change;
uint8_t vf_mac[ETHER_ADDR_LEN];
@@ -1261,8 +1424,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
rte_eth_copy_pci_info(eth_dev, pci_dev);
- qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
-
qed_ops = qed_get_eth_ops();
if (!qed_ops) {
DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
@@ -1292,17 +1453,18 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Start the Slowpath-process */
memset(&params, 0, sizeof(struct qed_slowpath_params));
params.int_mode = ECORE_INT_MODE_MSIX;
- params.drv_major = QEDE_MAJOR_VERSION;
- params.drv_minor = QEDE_MINOR_VERSION;
- params.drv_rev = QEDE_REVISION_VERSION;
- params.drv_eng = QEDE_ENGINEERING_VERSION;
- strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ params.drv_major = QEDE_PMD_VERSION_MAJOR;
+ params.drv_minor = QEDE_PMD_VERSION_MINOR;
+ params.drv_rev = QEDE_PMD_VERSION_REVISION;
+ params.drv_eng = QEDE_PMD_VERSION_PATCH;
+ strncpy((char *)params.name, QEDE_PMD_VER_PREFIX,
+ QEDE_PMD_DRV_VER_STR_SIZE);
/* For CMT mode device do periodic polling for slowpath events.
* This is required since uio device uses only one MSI-x
* interrupt vector but we need one for each engine.
*/
- if (edev->num_hwfns > 1) {
+ if (edev->num_hwfns > 1 && IS_PF(edev)) {
rc = rte_eal_alarm_set(timer_period * US_PER_S,
qede_poll_sp_sb_cb,
(void *)eth_dev);
@@ -1333,7 +1495,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
qede_alloc_etherdev(adapter, &dev_info);
- adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION);
+ adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION);
if (!is_vf)
adapter->dev_info.num_mac_addrs =
@@ -1393,6 +1555,8 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
do_once = false;
}
+ adapter->state = QEDE_DEV_INIT;
+
DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
adapter->primary_mac.addr_bytes[0],
adapter->primary_mac.addr_bytes[1],
@@ -1478,11 +1642,12 @@ static struct rte_pci_id pci_id_qede_map[] = {
static struct eth_driver rte_qedevf_pmd = {
.pci_drv = {
- .name = "rte_qedevf_pmd",
.id_table = pci_id_qedevf_map,
.drv_flags =
RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- },
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
+ },
.eth_dev_init = qedevf_eth_dev_init,
.eth_dev_uninit = qedevf_eth_dev_uninit,
.dev_private_size = sizeof(struct qede_dev),
@@ -1490,45 +1655,18 @@ static struct eth_driver rte_qedevf_pmd = {
static struct eth_driver rte_qede_pmd = {
.pci_drv = {
- .name = "rte_qede_pmd",
.id_table = pci_id_qede_map,
.drv_flags =
RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- },
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
+ },
.eth_dev_init = qede_eth_dev_init,
.eth_dev_uninit = qede_eth_dev_uninit,
.dev_private_size = sizeof(struct qede_dev),
};
-static int
-rte_qedevf_pmd_init(const char *name __rte_unused,
- const char *params __rte_unused)
-{
- rte_eth_driver_register(&rte_qedevf_pmd);
-
- return 0;
-}
-
-static int
-rte_qede_pmd_init(const char *name __rte_unused,
- const char *params __rte_unused)
-{
- rte_eth_driver_register(&rte_qede_pmd);
-
- return 0;
-}
-
-static struct rte_driver rte_qedevf_driver = {
- .type = PMD_PDEV,
- .init = rte_qede_pmd_init
-};
-
-static struct rte_driver rte_qede_driver = {
- .type = PMD_PDEV,
- .init = rte_qedevf_pmd_init
-};
-
-PMD_REGISTER_DRIVER(rte_qede_driver, qede);
-DRIVER_REGISTER_PCI_TABLE(qede, pci_id_qede_map);
-PMD_REGISTER_DRIVER(rte_qedevf_driver, qedevf);
-DRIVER_REGISTER_PCI_TABLE(qedevf, pci_id_qedevf_map);
+RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
+RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index abb33af1..a97e3d96 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -10,6 +10,8 @@
#ifndef _QEDE_ETHDEV_H_
#define _QEDE_ETHDEV_H_
+#include <sys/queue.h>
+
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_dev.h>
@@ -27,6 +29,8 @@
#include "base/ecore_hsi_eth.h"
#include "base/ecore_dev_api.h"
#include "base/ecore_iov_api.h"
+#include "base/ecore_cxt.h"
+#include "base/nvm_cfg.h"
#include "qede_logs.h"
#include "qede_if.h"
@@ -40,19 +44,18 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 1
-#define QEDE_PMD_VERSION_MINOR 1
+#define QEDE_PMD_VERSION_MINOR 2
#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
-#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 7
-#define QEDE_REVISION_VERSION 9
-#define QEDE_ENGINEERING_VERSION 0
+#define QEDE_PMD_VERSION qede_stringify(QEDE_PMD_VERSION_MAJOR) "." \
+ qede_stringify(QEDE_PMD_VERSION_MINOR) "." \
+ qede_stringify(QEDE_PMD_VERSION_REVISION) "." \
+ qede_stringify(QEDE_PMD_VERSION_PATCH)
+
+#define QEDE_PMD_DRV_VER_STR_SIZE NAME_SIZE
+#define QEDE_PMD_VER_PREFIX "QEDE PMD"
-#define QEDE_DRV_MODULE_VERSION qede_stringify(QEDE_MAJOR_VERSION) "." \
- qede_stringify(QEDE_MINOR_VERSION) "." \
- qede_stringify(QEDE_REVISION_VERSION) "." \
- qede_stringify(QEDE_ENGINEERING_VERSION)
#define QEDE_RSS_INDIR_INITED (1 << 0)
#define QEDE_RSS_KEY_INITED (1 << 1)
@@ -62,8 +65,13 @@
#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \
(edev)->dev_info.num_tc)
-#define QEDE_RSS_CNT(edev) ((edev)->num_rss)
-#define QEDE_TSS_CNT(edev) ((edev)->num_rss * (edev)->num_tc)
+#define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues)
+#define QEDE_RSS_COUNT(qdev) ((qdev)->num_queues - (qdev)->fp_num_tx)
+#define QEDE_TSS_COUNT(qdev) (((qdev)->num_queues - (qdev)->fp_num_rx) * \
+ (qdev)->num_tc)
+
+#define QEDE_FASTPATH_TX (1 << 0)
+#define QEDE_FASTPATH_RX (1 << 1)
#define QEDE_DUPLEX_FULL 1
#define QEDE_DUPLEX_HALF 2
@@ -103,22 +111,16 @@
extern char fw_file[];
/* Port/function states */
-enum dev_state {
- QEDE_START,
- QEDE_STOP,
- QEDE_CLOSE
+enum qede_dev_state {
+ QEDE_DEV_INIT, /* Init the chip and Slowpath */
+ QEDE_DEV_CONFIG, /* Create Vport/Fastpath resources */
+ QEDE_DEV_START, /* Start RX/TX queues, enable traffic */
+ QEDE_DEV_STOP, /* Deactivate vport and stop traffic */
};
-struct qed_int_param {
- uint32_t int_mode;
- uint8_t num_vectors;
- uint8_t min_msix_cnt;
-};
-
-struct qed_int_params {
- struct qed_int_param in;
- struct qed_int_param out;
- bool fp_initialized;
+struct qede_vlan_entry {
+ SLIST_ENTRY(qede_vlan_entry) list;
+ uint16_t vid;
};
/*
@@ -131,32 +133,40 @@ struct qede_dev {
struct qed_dev_eth_info dev_info;
struct ecore_sb_info *sb_array;
struct qede_fastpath *fp_array;
- uint16_t num_rss;
uint8_t num_tc;
uint16_t mtu;
bool rss_enabled;
struct qed_update_vport_rss_params rss_params;
uint32_t flags;
bool gro_disable;
- struct qede_rx_queue **rx_queues;
- struct qede_tx_queue **tx_queues;
- enum dev_state state;
-
- /* Vlans */
- osal_list_t vlan_list;
+ uint16_t num_queues;
+ uint8_t fp_num_tx;
+ uint8_t fp_num_rx;
+ enum qede_dev_state state;
+ SLIST_HEAD(vlan_list_head, qede_vlan_entry)vlan_list_head;
uint16_t configured_vlans;
- uint16_t non_configured_vlans;
bool accept_any_vlan;
- uint16_t vxlan_dst_port;
-
struct ether_addr primary_mac;
bool handle_hw_err;
- char drv_ver[QED_DRV_VER_STR_SIZE];
+ char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
};
+/* Static functions */
+static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
+ uint16_t vlan_id, int on);
+
+static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+
+/* Non-static functions */
+void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf);
+
int qed_fill_eth_dev_info(struct ecore_dev *edev,
struct qed_dev_eth_info *info);
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
-void qede_config_rx_mode(struct rte_eth_dev *eth_dev);
#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 1b05ff86..2131fe2a 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -70,20 +70,20 @@ struct qed_link_output {
uint32_t advertised_caps; /* In ADVERTISED defs */
uint32_t lp_caps; /* In ADVERTISED defs */
uint32_t speed; /* In Mb/s */
+ uint32_t adv_speed; /* Speed mask */
uint8_t duplex; /* In DUPLEX defs */
uint8_t port; /* In PORT defs */
bool autoneg;
uint32_t pause_config;
};
-#define QED_DRV_VER_STR_SIZE 80
struct qed_slowpath_params {
uint32_t int_mode;
uint8_t drv_major;
uint8_t drv_minor;
uint8_t drv_rev;
uint8_t drv_eng;
- uint8_t name[QED_DRV_VER_STR_SIZE];
+ uint8_t name[NAME_SIZE];
};
#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
@@ -152,13 +152,4 @@ struct qed_common_ops {
uint32_t dp_module, uint8_t dp_level);
};
-/**
- * @brief qed_get_protocol_version
- *
- * @param protocol
- *
- * @return version supported by qed for given protocol driver
- */
-uint32_t qed_get_protocol_version(enum qed_protocol protocol);
-
#endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 73608c69..ab22409a 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -6,11 +6,8 @@
* See LICENSE.qede_pmd for copyright and licensing details.
*/
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <zlib.h>
#include <limits.h>
+#include <time.h>
#include <rte_alarm.h>
#include "qede_ethdev.h"
@@ -20,12 +17,11 @@ static uint8_t npar_tx_switching = 1;
/* Alarm timeout. */
#define QEDE_ALARM_TIMEOUT_US 100000
-#define CONFIG_QED_BINARY_FW
/* Global variable to hold absolute path of fw file */
char fw_file[PATH_MAX];
const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.10.9.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -49,6 +45,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
enum qed_protocol protocol, uint32_t dp_module,
uint8_t dp_level, bool is_vf)
{
+ struct ecore_hw_prepare_params hw_prepare_params;
struct qede_dev *qdev = (struct qede_dev *)edev;
int rc;
@@ -56,11 +53,16 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
qdev->protocol = protocol;
if (is_vf) {
edev->b_is_vf = true;
- edev->sriov_info.b_hw_channel = true;
+ edev->b_hw_channel = true; /* @DPDK */
}
ecore_init_dp(edev, dp_module, dp_level, NULL);
qed_init_pci(edev, pci_dev);
- rc = ecore_hw_prepare(edev, ECORE_PCI_DEFAULT);
+
+ memset(&hw_prepare_params, 0, sizeof(hw_prepare_params));
+ hw_prepare_params.personality = ECORE_PCI_ETH;
+ hw_prepare_params.drv_resc_alloc = false;
+ hw_prepare_params.chk_reg_fifo = false;
+ rc = ecore_hw_prepare(edev, &hw_prepare_params);
if (rc) {
DP_ERR(edev, "hw prepare failed\n");
return rc;
@@ -83,6 +85,7 @@ static int qed_nic_setup(struct ecore_dev *edev)
return rc;
}
+#ifdef CONFIG_ECORE_ZIPPED_FW
static int qed_alloc_stream_mem(struct ecore_dev *edev)
{
int i;
@@ -112,7 +115,9 @@ static void qed_free_stream_mem(struct ecore_dev *edev)
OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream);
}
}
+#endif
+#ifdef CONFIG_ECORE_BINARY_FW
static int qed_load_firmware_data(struct ecore_dev *edev)
{
int fd;
@@ -158,6 +163,7 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
return 0;
}
+#endif
static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn)
{
@@ -216,13 +222,14 @@ static int qed_slowpath_start(struct ecore_dev *edev,
const uint8_t *data = NULL;
struct ecore_hwfn *hwfn;
struct ecore_mcp_drv_version drv_version;
+ struct ecore_hw_init_params hw_init_params;
struct qede_dev *qdev = (struct qede_dev *)edev;
int rc;
#ifdef QED_ENC_SUPPORTED
struct ecore_tunn_start_params tunn_info;
#endif
-#ifdef CONFIG_QED_BINARY_FW
+#ifdef CONFIG_ECORE_BINARY_FW
if (IS_PF(edev)) {
rc = qed_load_firmware_data(edev);
if (rc) {
@@ -240,7 +247,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
/* set int_coalescing_mode */
edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE;
- /* Should go with CONFIG_QED_BINARY_FW */
+#ifdef CONFIG_ECORE_ZIPPED_FW
if (IS_PF(edev)) {
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(edev);
@@ -252,14 +259,17 @@ static int qed_slowpath_start(struct ecore_dev *edev,
}
qed_start_iov_task(edev);
+#endif
- /* Start the slowpath */
-#ifdef CONFIG_QED_BINARY_FW
+#ifdef CONFIG_ECORE_BINARY_FW
if (IS_PF(edev))
- data = edev->firmware;
+ data = (const uint8_t *)edev->firmware + sizeof(u32);
#endif
+
allow_npar_tx_switching = npar_tx_switching ? true : false;
+ /* Start the slowpath */
+ memset(&hw_init_params, 0, sizeof(hw_init_params));
#ifdef QED_ENC_SUPPORTED
memset(&tunn_info, 0, sizeof(tunn_info));
tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
@@ -269,12 +279,14 @@ static int qed_slowpath_start(struct ecore_dev *edev,
tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
- rc = ecore_hw_init(edev, &tunn_info, true, ECORE_INT_MODE_MSIX,
- allow_npar_tx_switching, data);
-#else
- rc = ecore_hw_init(edev, NULL, true, ECORE_INT_MODE_MSIX,
- allow_npar_tx_switching, data);
+ hw_init_params.p_tunn = &tunn_info;
#endif
+ hw_init_params.b_hw_start = true;
+ hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
+ hw_init_params.allow_npar_tx_switch = allow_npar_tx_switching;
+ hw_init_params.bin_fw_data = data;
+ hw_init_params.epoch = (u32)time(NULL);
+ rc = ecore_hw_init(edev, &hw_init_params);
if (rc) {
DP_ERR(edev, "ecore_hw_init failed\n");
goto err2;
@@ -307,7 +319,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
err2:
ecore_resc_free(edev);
err:
-#ifdef CONFIG_QED_BINARY_FW
+#ifdef CONFIG_ECORE_BINARY_FW
if (IS_PF(edev)) {
if (edev->firmware)
rte_free(edev->firmware);
@@ -346,7 +358,7 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
if (IS_PF(edev)) {
ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
if (ptt) {
- ecore_mcp_get_mfw_ver(edev, ptt,
+ ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
&dev_info->mfw_rev, NULL);
ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt,
@@ -361,7 +373,8 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt);
}
} else {
- ecore_mcp_get_mfw_ver(edev, ptt, &dev_info->mfw_rev, NULL);
+ ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
+ &dev_info->mfw_rev, NULL);
}
return 0;
@@ -371,6 +384,7 @@ int
qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
{
struct qede_dev *qdev = (struct qede_dev *)edev;
+ uint8_t queues = 0;
int i;
memset(info, 0, sizeof(*info));
@@ -378,20 +392,36 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
info->num_tc = 1 /* @@@TBD aelior MULTI_COS */;
if (IS_PF(edev)) {
+ int max_vf_vlan_filters = 0;
+
info->num_queues = 0;
for_each_hwfn(edev, i)
info->num_queues +=
FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE);
- info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN);
+ if (edev->p_iov_info)
+ max_vf_vlan_filters = edev->p_iov_info->total_vfs *
+ ECORE_ETH_VF_NUM_VLAN_FILTERS;
+ info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN) -
+ max_vf_vlan_filters;
rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
ETHER_ADDR_LEN);
} else {
- ecore_vf_get_num_rxqs(&edev->hwfns[0], &info->num_queues);
+ ecore_vf_get_num_rxqs(ECORE_LEADING_HWFN(edev),
+ &info->num_queues);
+ if (edev->num_hwfns > 1) {
+ ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues);
+ info->num_queues += queues;
+ /* Restrict 100G VF to advertise 16 queues till the
+ * required support is available to go beyond 16.
+ */
+ info->num_queues = RTE_MIN(info->num_queues,
+ ECORE_MAX_VF_CHAINS_PER_PF);
+ }
ecore_vf_get_num_vlan_filters(&edev->hwfns[0],
- &info->num_vlan_filters);
+ (u8 *)&info->num_vlan_filters);
ecore_vf_get_port_mac(&edev->hwfns[0],
(uint8_t *)&info->port_mac);
@@ -407,7 +437,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
static void
qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],
- const char ver_str[VER_SIZE])
+ const char ver_str[NAME_SIZE])
{
int i;
@@ -415,7 +445,7 @@ qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],
for_each_hwfn(edev, i) {
snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
}
- rte_memcpy(edev->ver_str, ver_str, VER_SIZE);
+ memcpy(edev->ver_str, ver_str, NAME_SIZE);
edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
}
@@ -485,6 +515,9 @@ static void qed_fill_link(struct ecore_hwfn *hwfn,
if_link->duplex = QEDE_DUPLEX_FULL;
+ /* Fill up the native advertised speed cap mask */
+ if_link->adv_speed = params.speed.advertised_speeds;
+
if (params.speed.autoneg)
if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG;
@@ -625,7 +658,9 @@ static int qed_slowpath_stop(struct ecore_dev *edev)
return -ENODEV;
if (IS_PF(edev)) {
+#ifdef CONFIG_ECORE_ZIPPED_FW
qed_free_stream_mem(edev);
+#endif
#ifdef CONFIG_QED_SRIOV
if (IS_QED_ETH_IF(edev))
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index b5a40fe4..2e181c8c 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -61,7 +61,23 @@ void qede_rx_queue_release(void *rx_queue)
rte_free(rxq->sw_rx_ring);
rxq->sw_rx_ring = NULL;
rte_free(rxq);
- rx_queue = NULL;
+ rxq = NULL;
+ }
+}
+
+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
+{
+ unsigned int i;
+
+ PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
+
+ if (txq->sw_tx_ring) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_tx_ring[i].mbuf) {
+ rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
+ txq->sw_tx_ring[i].mbuf = NULL;
+ }
+ }
}
}
@@ -116,15 +132,19 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM;
- if (pkt_len > data_size) {
+ if (pkt_len > data_size && !dev->data->scattered_rx) {
DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
pkt_len, data_size);
rte_free(rxq);
return -EINVAL;
}
+ if (dev->data->scattered_rx)
+ rxq->rx_buf_size = data_size;
+ else
+ rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
+
qdev->mtu = pkt_len;
- rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
qdev->mtu, rxq->rx_buf_size);
@@ -166,6 +186,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->sw_rx_ring = NULL;
rte_free(rxq);
rxq = NULL;
+ return -ENOMEM;
}
/* Allocate FW completion ring */
@@ -185,6 +206,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rte_free(rxq->sw_rx_ring);
rxq->sw_rx_ring = NULL;
rte_free(rxq);
+ return -ENOMEM;
}
/* Allocate buffers for the Rx ring */
@@ -198,8 +220,6 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
dev->data->rx_queues[queue_idx] = rxq;
- if (!qdev->rx_queues)
- qdev->rx_queues = (struct qede_rx_queue **)dev->data->rx_queues;
DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
queue_idx, nb_desc, qdev->mtu, socket_id);
@@ -210,22 +230,6 @@ err4:
return -ENOMEM;
}
-static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
-{
- unsigned int i;
-
- PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
-
- if (txq->sw_tx_ring != NULL) {
- for (i = 0; i < txq->nb_tx_desc; i++) {
- if (txq->sw_tx_ring[i].mbuf != NULL) {
- rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
- txq->sw_tx_ring[i].mbuf = NULL;
- }
- }
- }
-}
-
void qede_tx_queue_release(void *tx_queue)
{
struct qede_tx_queue *txq = tx_queue;
@@ -238,7 +242,7 @@ void qede_tx_queue_release(void *tx_queue)
}
rte_free(txq);
}
- tx_queue = NULL;
+ txq = NULL;
}
int
@@ -319,10 +323,6 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
(txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
dev->data->tx_queues[queue_idx] = txq;
- if (!qdev->tx_queues)
- qdev->tx_queues = (struct qede_tx_queue **)dev->data->tx_queues;
-
- txq->txq_counter = 0;
DP_INFO(edev,
"txq %u num_desc %u tx_free_thresh %u socket %u\n",
@@ -335,32 +335,25 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
static void qede_init_fp(struct qede_dev *qdev)
{
struct qede_fastpath *fp;
- int rss_id, txq_index, tc;
+ uint8_t i, rss_id, tc;
+ int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0;
- memset((void *)qdev->fp_array, 0, (QEDE_RSS_CNT(qdev) *
+ memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
sizeof(*qdev->fp_array)));
- memset((void *)qdev->sb_array, 0, (QEDE_RSS_CNT(qdev) *
+ memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) *
sizeof(*qdev->sb_array)));
- for_each_rss(rss_id) {
- fp = &qdev->fp_array[rss_id];
-
- fp->qdev = qdev;
- fp->rss_id = rss_id;
-
- /* Point rxq to generic rte queues that was created
- * as part of queue creation.
- */
- fp->rxq = qdev->rx_queues[rss_id];
- fp->sb_info = &qdev->sb_array[rss_id];
-
- for (tc = 0; tc < qdev->num_tc; tc++) {
- txq_index = tc * QEDE_RSS_CNT(qdev) + rss_id;
- fp->txqs[tc] = qdev->tx_queues[txq_index];
- fp->txqs[tc]->queue_id = txq_index;
- /* Updating it to main structure */
- snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
- "qdev", rss_id);
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if (fp_rx) {
+ fp->type = QEDE_FASTPATH_RX;
+ fp_rx--;
+ } else{
+ fp->type = QEDE_FASTPATH_TX;
}
+ fp->qdev = qdev;
+ fp->id = i;
+ fp->sb_info = &qdev->sb_array[i];
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
}
qdev->gro_disable = gro_disable;
@@ -386,7 +379,7 @@ int qede_alloc_fp_array(struct qede_dev *qdev)
struct ecore_dev *edev = &qdev->edev;
int i;
- qdev->fp_array = rte_calloc("fp", QEDE_RSS_CNT(qdev),
+ qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
sizeof(*qdev->fp_array),
RTE_CACHE_LINE_SIZE);
@@ -395,7 +388,7 @@ int qede_alloc_fp_array(struct qede_dev *qdev)
return -ENOMEM;
}
- qdev->sb_array = rte_calloc("sb", QEDE_RSS_CNT(qdev),
+ qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev),
sizeof(*qdev->sb_array),
RTE_CACHE_LINE_SIZE);
@@ -437,50 +430,52 @@ qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
return 0;
}
-static int qede_alloc_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
-{
- return qede_alloc_mem_sb(qdev, fp->sb_info, fp->rss_id);
-}
-
-static void qede_shrink_txq(struct qede_dev *qdev, uint16_t num_rss)
-{
- /* @@@TBD - this should also re-set the qed interrupts */
-}
-
-/* This function allocates all qede memory at NIC load. */
-static int qede_alloc_mem_load(struct qede_dev *qdev)
+int qede_alloc_fp_resc(struct qede_dev *qdev)
{
- int rc = 0, rss_id;
struct ecore_dev *edev = &qdev->edev;
+ struct qede_fastpath *fp;
+ uint32_t num_sbs;
+ int rc, i;
- for (rss_id = 0; rss_id < QEDE_RSS_CNT(qdev); rss_id++) {
- struct qede_fastpath *fp = &qdev->fp_array[rss_id];
+ if (IS_VF(edev))
+ ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
+ else
+ num_sbs = (ecore_cxt_get_proto_cid_count
+ (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL)) / 2;
- rc = qede_alloc_mem_fp(qdev, fp);
- if (rc)
- break;
+ if (num_sbs == 0) {
+ DP_ERR(edev, "No status blocks available\n");
+ return -EINVAL;
}
- if (rss_id != QEDE_RSS_CNT(qdev)) {
- /* Failed allocating memory for all the queues */
- if (!rss_id) {
- DP_ERR(edev,
- "Failed to alloc memory for leading queue\n");
- rc = -ENOMEM;
- } else {
- DP_NOTICE(edev, false,
- "Failed to allocate memory for all of "
- "RSS queues\n"
- "Desired: %d queues, allocated: %d queues\n",
- QEDE_RSS_CNT(qdev), rss_id);
- qede_shrink_txq(qdev, rss_id);
+ if (qdev->fp_array)
+ qede_free_fp_arrays(qdev);
+
+ rc = qede_alloc_fp_array(qdev);
+ if (rc != 0)
+ return rc;
+
+ qede_init_fp(qdev);
+
+ for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
+ fp = &qdev->fp_array[i];
+ if (qede_alloc_mem_sb(qdev, fp->sb_info, i % num_sbs)) {
+ qede_free_fp_arrays(qdev);
+ return -ENOMEM;
}
- qdev->num_rss = rss_id;
}
return 0;
}
+void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+
+ qede_free_mem_load(eth_dev);
+ qede_free_fp_arrays(qdev);
+}
+
static inline void
qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
{
@@ -528,9 +523,9 @@ static void qede_prandom_bytes(uint32_t *buff, size_t bytes)
buff[i] = rand();
}
-static int
-qede_config_rss(struct rte_eth_dev *eth_dev,
- struct qed_update_vport_rss_params *rss_params)
+static bool
+qede_check_vport_rss_enable(struct rte_eth_dev *eth_dev,
+ struct qed_update_vport_rss_params *rss_params)
{
struct rte_eth_rss_conf rss_conf;
enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode;
@@ -541,133 +536,116 @@ qede_config_rss(struct rte_eth_dev *eth_dev,
uint64_t hf;
uint32_t *key;
+ PMD_INIT_FUNC_TRACE(edev);
+
rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
key = (uint32_t *)rss_conf.rss_key;
hf = rss_conf.rss_hf;
- PMD_INIT_FUNC_TRACE(edev);
/* Check if RSS conditions are met.
* Note: Even though its meaningless to enable RSS with one queue, it
* could be used to produce RSS Hash, so skipping that check.
*/
-
if (!(mode & ETH_MQ_RX_RSS)) {
DP_INFO(edev, "RSS flag is not set\n");
- return -EINVAL;
+ return false;
}
- DP_INFO(edev, "RSS flag is set\n");
-
- if (rss_conf.rss_hf == 0)
- DP_NOTICE(edev, false, "RSS hash function = 0, disables RSS\n");
-
- if (rss_conf.rss_key != NULL)
- memcpy(qdev->rss_params.rss_key, rss_conf.rss_key,
- rss_conf.rss_key_len);
+ if (hf == 0) {
+ DP_INFO(edev, "Request to disable RSS\n");
+ return false;
+ }
memset(rss_params, 0, sizeof(*rss_params));
for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i,
- QEDE_RSS_CNT(qdev));
+ QEDE_RSS_COUNT(qdev));
- /* key and protocols */
- if (rss_conf.rss_key == NULL)
+ if (!key)
qede_prandom_bytes(rss_params->rss_key,
sizeof(rss_params->rss_key));
else
memcpy(rss_params->rss_key, rss_conf.rss_key,
rss_conf.rss_key_len);
- rss_caps = 0;
- rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
- rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0;
- rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0;
- rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
- rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
- rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ qede_init_rss_caps(&rss_caps, hf);
rss_params->rss_caps = rss_caps;
- DP_INFO(edev, "RSS check passes\n");
+ DP_INFO(edev, "RSS conditions are met\n");
- return 0;
+ return true;
}
static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ struct ecore_queue_start_common_params q_params;
struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
struct qed_dev_info *qed_info = &qdev->dev_info.common;
struct qed_update_vport_params vport_update_params;
- struct qed_start_vport_params start = { 0 };
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ dma_addr_t p_phys_table;
+ int txq_index;
+ uint16_t page_cnt;
int vlan_removal_en = 1;
int rc, tc, i;
- if (!qdev->num_rss) {
- DP_ERR(edev,
- "Cannot update V-VPORT as active as "
- "there are no Rx queues\n");
- return -EINVAL;
- }
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->
+ rx_comp_ring);
+ page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
+ rx_comp_ring);
- start.remove_inner_vlan = vlan_removal_en;
- start.gro_enable = !qdev->gro_disable;
- start.mtu = qdev->mtu;
- start.vport_id = 0;
- start.drop_ttl0 = true;
- start.clear_stats = clear_stats;
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.queue_id = i;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = RX_PI;
- rc = qdev->ops->vport_start(edev, &start);
- if (rc) {
- DP_ERR(edev, "Start V-PORT failed %d\n", rc);
- return rc;
- }
-
- DP_INFO(edev,
- "Start vport ramrod passed, vport_id = %d,"
- " MTU = %d, vlan_removal_en = %d\n",
- start.vport_id, qdev->mtu, vlan_removal_en);
-
- for_each_rss(i) {
- struct qede_fastpath *fp = &qdev->fp_array[i];
- dma_addr_t p_phys_table;
- uint16_t page_cnt;
-
- p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
- page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+ ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
- ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); /* @DPDK */
-
- rc = qdev->ops->q_rx_start(edev, i, i, 0,
- fp->sb_info->igu_sb_id,
- RX_PI,
+ rc = qdev->ops->q_rx_start(edev, i, &q_params,
fp->rxq->rx_buf_size,
fp->rxq->rx_bd_ring.p_phys_addr,
p_phys_table,
page_cnt,
&fp->rxq->hw_rxq_prod_addr);
- if (rc) {
- DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc);
- return rc;
- }
+ if (rc) {
+ DP_ERR(edev, "Start rxq #%d failed %d\n",
+ fp->rxq->queue_id, rc);
+ return rc;
+ }
- fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ fp->rxq->hw_cons_ptr =
+ &fp->sb_info->sb_virt->pi_array[RX_PI];
- qede_update_rx_prod(qdev, fp->rxq);
+ qede_update_rx_prod(qdev, fp->rxq);
+ }
+ if (!(fp->type & QEDE_FASTPATH_TX))
+ continue;
for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qede_tx_queue *txq = fp->txqs[tc];
- int txq_index = tc * QEDE_RSS_CNT(qdev) + i;
+ txq = fp->txqs[tc];
+ txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
- rc = qdev->ops->q_tx_start(edev, i, txq_index,
- 0,
- fp->sb_info->igu_sb_id,
- TX_PI(tc),
- p_phys_table, page_cnt,
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.queue_id = txq->queue_id;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = TX_PI(tc);
+
+ rc = qdev->ops->q_tx_start(edev, i, &q_params,
+ p_phys_table,
+ page_cnt, /* **pp_doorbell */
&txq->doorbell_addr);
if (rc) {
DP_ERR(edev, "Start txq %u failed %d\n",
@@ -691,7 +669,9 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
/* Prepare and send the vport enable */
memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = start.vport_id;
+ /* Update MTU via vport update */
+ vport_update_params.mtu = qdev->mtu;
+ vport_update_params.vport_id = 0;
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1;
@@ -702,14 +682,11 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
vport_update_params.tx_switching_flg = 1;
}
- if (!qede_config_rss(eth_dev, rss_params)) {
+ if (qede_check_vport_rss_enable(eth_dev, rss_params)) {
vport_update_params.update_rss_flg = 1;
-
qdev->rss_enabled = 1;
- DP_INFO(edev, "Updating RSS flag\n");
} else {
qdev->rss_enabled = 0;
- DP_INFO(edev, "Not Updating RSS flag\n");
}
rte_memcpy(&vport_update_params.rss_params, rss_params,
@@ -857,6 +834,59 @@ static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
return RTE_PTYPE_L2_ETHER | p_type;
}
+int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
+ int num_segs, uint16_t pkt_len)
+{
+ struct qede_rx_queue *rxq = p_rxq;
+ struct qede_dev *qdev = rxq->qdev;
+ struct ecore_dev *edev = &qdev->edev;
+ uint16_t sw_rx_index, cur_size;
+
+ register struct rte_mbuf *seg1 = NULL;
+ register struct rte_mbuf *seg2 = NULL;
+
+ seg1 = rx_mb;
+ while (num_segs) {
+ cur_size = pkt_len > rxq->rx_buf_size ?
+ rxq->rx_buf_size : pkt_len;
+ if (!cur_size) {
+ PMD_RX_LOG(DEBUG, rxq,
+ "SG packet, len and num BD mismatch\n");
+ qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
+ return -EINVAL;
+ }
+
+ if (qede_alloc_rx_buffer(rxq)) {
+ uint8_t index;
+
+ PMD_RX_LOG(DEBUG, rxq, "Buffer allocation failed\n");
+ index = rxq->port_id;
+ rte_eth_devices[index].data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ return -ENOMEM;
+ }
+
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ qede_rx_bd_ring_consume(rxq);
+ pkt_len -= cur_size;
+ seg2->data_len = cur_size;
+ seg1->next = seg2;
+ seg1 = seg1->next;
+ num_segs--;
+ rxq->rx_segs++;
+ continue;
+ }
+ seg1 = NULL;
+
+ if (pkt_len)
+ PMD_RX_LOG(DEBUG, rxq,
+ "Mapped all BDs of jumbo, but still have %d bytes\n",
+ pkt_len);
+
+ return ECORE_SUCCESS;
+}
+
uint16_t
qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
@@ -869,12 +899,12 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
union eth_rx_cqe *cqe;
struct eth_fast_path_rx_reg_cqe *fp_cqe;
register struct rte_mbuf *rx_mb = NULL;
+ register struct rte_mbuf *seg1 = NULL;
enum eth_rx_cqe_type cqe_type;
- uint16_t len, pad;
- uint16_t preload_idx;
- uint8_t csum_flag;
- uint16_t parse_flag;
+ uint16_t len, pad, preload_idx, pkt_len, parse_flag;
+ uint8_t csum_flag, num_segs;
enum rss_hash_type htype;
+ int ret;
hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -893,7 +923,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
- qdev->ops->eth_cqe_completion(edev, fp->rss_id,
+ qdev->ops->eth_cqe_completion(edev, fp->id,
(struct eth_slow_path_rx_cqe *)cqe);
goto next_cqe;
}
@@ -944,20 +974,33 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
qede_rx_bd_ring_consume(rxq);
+ if (fp_cqe->bd_num > 1) {
+ pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+ num_segs = fp_cqe->bd_num - 1;
+
+ rxq->rx_segs++;
+
+ pkt_len -= len;
+ seg1 = rx_mb;
+ ret = qede_process_sg_pkts(p_rxq, seg1, num_segs,
+ pkt_len);
+ if (ret != ECORE_SUCCESS) {
+ qede_recycle_rx_bd_ring(rxq, qdev,
+ fp_cqe->bd_num);
+ goto next_cqe;
+ }
+ }
+
/* Prefetch next mbuf while processing current one. */
preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
- if (fp_cqe->bd_num != 1)
- PMD_RX_LOG(DEBUG, rxq,
- "Jumbo-over-BD packet not supported\n");
-
/* Update MBUF fields */
rx_mb->ol_flags = 0;
rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
- rx_mb->nb_segs = 1;
+ rx_mb->nb_segs = fp_cqe->bd_num;
rx_mb->data_len = len;
- rx_mb->pkt_len = len;
+ rx_mb->pkt_len = fp_cqe->pkt_len;
rx_mb->port = rxq->port_id;
rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);
@@ -984,6 +1027,7 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
+ rx_mb->vlan_tci_outer = 0;
}
rx_pkts[rx_pkt] = rx_mb;
@@ -1001,6 +1045,8 @@ next_cqe:
qede_update_rx_prod(qdev, rxq);
+ rxq->rcv_pkts += rx_pkt;
+
PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
return rx_pkt;
@@ -1009,24 +1055,28 @@ next_cqe:
static inline int
qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
{
- uint16_t idx = TX_CONS(txq);
+ uint16_t nb_segs, idx = TX_CONS(txq);
struct eth_tx_bd *tx_data_bd;
struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
if (unlikely(!mbuf)) {
+ PMD_TX_LOG(ERR, txq, "null mbuf\n");
PMD_TX_LOG(ERR, txq,
- "null mbuf nb_tx_desc %u nb_tx_avail %u "
- "sw_tx_cons %u sw_tx_prod %u\n",
+ "tx_desc %u tx_avail %u tx_cons %u tx_prod %u\n",
txq->nb_tx_desc, txq->nb_tx_avail, idx,
TX_PROD(txq));
return -1;
}
- /* Free now */
- rte_pktmbuf_free_seg(mbuf);
+ nb_segs = mbuf->nb_segs;
+ while (nb_segs) {
+ /* It's like consuming rxbuf in recv() */
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ nb_segs--;
+ }
+ rte_pktmbuf_free(mbuf);
txq->sw_tx_ring[idx].mbuf = NULL;
- ecore_chain_consume(&txq->tx_pbl);
- txq->nb_tx_avail++;
return 0;
}
@@ -1036,18 +1086,16 @@ qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
{
uint16_t tx_compl = 0;
uint16_t hw_bd_cons;
- int rc;
hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
rte_compiler_barrier();
while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
- rc = qede_free_tx_pkt(edev, txq);
- if (rc) {
- DP_NOTICE(edev, false,
- "hw_bd_cons = %d, chain_cons=%d\n",
- hw_bd_cons,
- ecore_chain_get_cons_idx(&txq->tx_pbl));
+ if (qede_free_tx_pkt(edev, txq)) {
+ PMD_TX_LOG(ERR, txq,
+ "hw_bd_cons = %u, chain_cons = %u\n",
+ hw_bd_cons,
+ ecore_chain_get_cons_idx(&txq->tx_pbl));
break;
}
txq->sw_tx_cons++; /* Making TXD available */
@@ -1059,19 +1107,72 @@ qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
return tx_compl;
}
+/* Populate scatter gather buffer descriptor fields */
+static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq,
+ struct rte_mbuf *m_seg,
+ uint16_t count,
+ struct eth_tx_1st_bd *bd1)
+{
+ struct qede_tx_queue *txq = p_txq;
+ struct eth_tx_2nd_bd *bd2 = NULL;
+ struct eth_tx_3rd_bd *bd3 = NULL;
+ struct eth_tx_bd *tx_bd = NULL;
+ uint16_t nb_segs = count;
+ dma_addr_t mapping;
+
+ /* Check for scattered buffers */
+ while (m_seg) {
+ if (nb_segs == 1) {
+ bd2 = (struct eth_tx_2nd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(bd2, 0, sizeof(*bd2));
+ mapping = rte_mbuf_data_dma_addr(m_seg);
+ bd2->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+ bd2->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+ bd2->nbytes = rte_cpu_to_le_16(m_seg->data_len);
+ } else if (nb_segs == 2) {
+ bd3 = (struct eth_tx_3rd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(bd3, 0, sizeof(*bd3));
+ mapping = rte_mbuf_data_dma_addr(m_seg);
+ bd3->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+ bd3->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+ bd3->nbytes = rte_cpu_to_le_16(m_seg->data_len);
+ } else {
+ tx_bd = (struct eth_tx_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(tx_bd, 0, sizeof(*tx_bd));
+ mapping = rte_mbuf_data_dma_addr(m_seg);
+ tx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+ tx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+ tx_bd->nbytes = rte_cpu_to_le_16(m_seg->data_len);
+ }
+ nb_segs++;
+ bd1->data.nbds = nb_segs;
+ m_seg = m_seg->next;
+ }
+
+ /* Return total scattered buffers */
+ return nb_segs;
+}
+
uint16_t
qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct qede_tx_queue *txq = p_txq;
struct qede_dev *qdev = txq->qdev;
struct ecore_dev *edev = &qdev->edev;
- struct qede_fastpath *fp = &qdev->fp_array[txq->queue_id];
- struct eth_tx_1st_bd *first_bd;
+ struct qede_fastpath *fp;
+ struct eth_tx_1st_bd *bd1;
+ struct rte_mbuf *m_seg = NULL;
uint16_t nb_tx_pkts;
uint16_t nb_pkt_sent = 0;
uint16_t bd_prod;
uint16_t idx;
uint16_t tx_count;
+ uint16_t nb_segs = 0;
+
+ fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
@@ -1079,7 +1180,8 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
(void)qede_process_tx_compl(edev, txq);
}
- nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / MAX_NUM_TX_BDS));
+ nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail /
+ ETH_TX_MAX_BDS_PER_NON_LSO_PACKET));
if (unlikely(nb_tx_pkts == 0)) {
PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
nb_pkts, txq->nb_tx_avail);
@@ -1091,41 +1193,53 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = TX_PROD(txq);
struct rte_mbuf *mbuf = *tx_pkts++;
+
txq->sw_tx_ring[idx].mbuf = mbuf;
- first_bd = (struct eth_tx_1st_bd *)
- ecore_chain_produce(&txq->tx_pbl);
- first_bd->data.bd_flags.bitfields =
- 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+ bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
+ /* Zero init struct fields */
+ bd1->data.bd_flags.bitfields = 0;
+ bd1->data.bitfields = 0;
+
+ bd1->data.bd_flags.bitfields =
+ 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
/* Map MBUF linear data for DMA and set in the first BD */
- QEDE_BD_SET_ADDR_LEN(first_bd, rte_mbuf_data_dma_addr(mbuf),
- mbuf->data_len);
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ mbuf->pkt_len);
/* Descriptor based VLAN insertion */
if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
- first_bd->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
- first_bd->data.bd_flags.bitfields |=
+ bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
+ bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
}
/* Offload the IP checksum in the hardware */
if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
- first_bd->data.bd_flags.bitfields |=
+ bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
}
/* L4 checksum offload (tcp or udp) */
if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- first_bd->data.bd_flags.bitfields |=
+ bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
/* IPv6 + extn. -> later */
}
- first_bd->data.nbds = MAX_NUM_TX_BDS;
+
+ /* Handle fragmented MBUF */
+ m_seg = mbuf->next;
+ nb_segs++;
+ bd1->data.nbds = nb_segs;
+ /* Encode scatter gather buffer descriptors if required */
+ nb_segs = qede_encode_sg_bd(txq, m_seg, nb_segs, bd1);
+ txq->nb_tx_avail = txq->nb_tx_avail - nb_segs;
+ nb_segs = 0;
txq->sw_tx_prod++;
rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
- txq->nb_tx_avail--;
bd_prod =
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
nb_pkt_sent++;
+ txq->xmit_pkts++;
}
/* Write value of prod idx into bd_prod */
@@ -1144,57 +1258,70 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_pkt_sent;
}
+static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct qede_fastpath *fp;
+ uint8_t i, rss_id, txq_index, tc;
+ int rxq = 0, txq = 0;
+
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ fp->rxq = eth_dev->data->rx_queues[i];
+ fp->rxq->queue_id = rxq++;
+ }
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ txq_index = tc * QEDE_TSS_COUNT(qdev) + txq;
+ fp->txqs[tc] =
+ eth_dev->data->tx_queues[txq_index];
+ fp->txqs[tc]->queue_id = txq_index;
+ }
+ txq++;
+ }
+ }
+}
+
int qede_dev_start(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct qed_link_output link_output;
- int rc;
+ struct qede_fastpath *fp;
+ int rc, i;
- DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
+ DP_INFO(edev, "Device state is %d\n", qdev->state);
- if (qdev->state == QEDE_START) {
- DP_INFO(edev, "device already started\n");
+ if (qdev->state == QEDE_DEV_START) {
+ DP_INFO(edev, "Port is already started\n");
return 0;
}
- if (qdev->state == QEDE_CLOSE) {
- rc = qede_alloc_fp_array(qdev);
- qede_init_fp(qdev);
- rc = qede_alloc_mem_load(qdev);
- DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n",
- QEDE_RSS_CNT(qdev), qdev->num_tc);
- } else if (qdev->state == QEDE_STOP) {
- DP_INFO(edev, "restarting port %u\n", eth_dev->data->port_id);
- } else {
- DP_INFO(edev, "unknown state port %u\n",
- eth_dev->data->port_id);
- return -EINVAL;
- }
+ if (qdev->state == QEDE_DEV_CONFIG)
+ qede_init_fp_queue(eth_dev);
rc = qede_start_queues(eth_dev, true);
-
if (rc) {
DP_ERR(edev, "Failed to start queues\n");
/* TBD: free */
return rc;
}
- DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
-
+ /* Bring-up the link */
qede_dev_set_link_state(eth_dev, true);
- /* Query whether link is already-up */
- memset(&link_output, 0, sizeof(link_output));
- qdev->ops->common->get_link(edev, &link_output);
- DP_NOTICE(edev, false, "link status: %s\n",
- link_output.link_up ? "up" : "down");
+ /* Reset ring */
+ if (qede_reset_fp_rings(qdev))
+ return -ENOMEM;
- qdev->state = QEDE_START;
+ /* Start/resume traffic */
+ qdev->ops->fastpath_start(edev);
- qede_config_rx_mode(eth_dev);
+ qdev->state = QEDE_DEV_START;
- DP_INFO(edev, "dev_state is QEDE_START\n");
+ DP_INFO(edev, "dev_state is QEDE_DEV_START\n");
return 0;
}
@@ -1250,7 +1377,7 @@ static int qede_stop_queues(struct qede_dev *qdev)
vport_update_params.vport_active_flg = 0;
vport_update_params.update_rss_flg = 0;
- DP_INFO(edev, "vport_update\n");
+ DP_INFO(edev, "Deactivate vport\n");
rc = qdev->ops->vport_update(edev, &vport_update_params);
if (rc) {
@@ -1261,129 +1388,153 @@ static int qede_stop_queues(struct qede_dev *qdev)
DP_INFO(edev, "Flushing tx queues\n");
/* Flush Tx queues. If needed, request drain from MCP */
- for_each_rss(i) {
+ for_each_queue(i) {
struct qede_fastpath *fp = &qdev->fp_array[i];
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qede_tx_queue *txq = fp->txqs[tc];
- rc = qede_drain_txq(qdev, txq, true);
- if (rc)
- return rc;
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct qede_tx_queue *txq = fp->txqs[tc];
+
+ rc = qede_drain_txq(qdev, txq, true);
+ if (rc)
+ return rc;
+ }
}
}
/* Stop all Queues in reverse order */
- for (i = QEDE_RSS_CNT(qdev) - 1; i >= 0; i--) {
+ for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
struct qed_stop_rxq_params rx_params;
/* Stop the Tx Queue(s) */
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qed_stop_txq_params tx_params;
-
- tx_params.rss_id = i;
- tx_params.tx_queue_id = tc * QEDE_RSS_CNT(qdev) + i;
-
- DP_INFO(edev, "Stopping tx queues\n");
- rc = qdev->ops->q_tx_stop(edev, &tx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ #%d\n",
- tx_params.tx_queue_id);
- return rc;
+ if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct qed_stop_txq_params tx_params;
+ u8 val;
+
+ tx_params.rss_id = i;
+ val = qdev->fp_array[i].txqs[tc]->queue_id;
+ tx_params.tx_queue_id = val;
+
+ DP_INFO(edev, "Stopping tx queues\n");
+ rc = qdev->ops->q_tx_stop(edev, &tx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop TXQ #%d\n",
+ tx_params.tx_queue_id);
+ return rc;
+ }
}
}
/* Stop the Rx Queue */
- memset(&rx_params, 0, sizeof(rx_params));
- rx_params.rss_id = i;
- rx_params.rx_queue_id = i;
- rx_params.eq_completion_only = 1;
+ if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ memset(&rx_params, 0, sizeof(rx_params));
+ rx_params.rss_id = i;
+ rx_params.rx_queue_id = qdev->fp_array[i].rxq->queue_id;
+ rx_params.eq_completion_only = 1;
- DP_INFO(edev, "Stopping rx queues\n");
+ DP_INFO(edev, "Stopping rx queues\n");
- rc = qdev->ops->q_rx_stop(edev, &rx_params);
- if (rc) {
- DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
- return rc;
+ rc = qdev->ops->q_rx_stop(edev, &rx_params);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
}
}
- DP_INFO(edev, "Stopping vports\n");
-
- /* Stop the vport */
- rc = qdev->ops->vport_stop(edev, 0);
- if (rc)
- DP_ERR(edev, "Failed to stop VPORT\n");
-
- return rc;
+ return 0;
}
-void qede_reset_fp_rings(struct qede_dev *qdev)
+int qede_reset_fp_rings(struct qede_dev *qdev)
{
- uint16_t rss_id;
+ struct qede_fastpath *fp;
+ struct qede_tx_queue *txq;
uint8_t tc;
-
- for_each_rss(rss_id) {
- DP_INFO(&qdev->edev, "reset fp chain for rss %u\n", rss_id);
- struct qede_fastpath *fp = &qdev->fp_array[rss_id];
- ecore_chain_reset(&fp->rxq->rx_bd_ring);
- ecore_chain_reset(&fp->rxq->rx_comp_ring);
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qede_tx_queue *txq = fp->txqs[tc];
- ecore_chain_reset(&txq->tx_pbl);
+ uint16_t id, i;
+
+ for_each_queue(id) {
+ fp = &qdev->fp_array[id];
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ DP_INFO(&qdev->edev,
+ "Reset FP chain for RSS %u\n", id);
+ qede_rx_queue_release_mbufs(fp->rxq);
+ ecore_chain_reset(&fp->rxq->rx_bd_ring);
+ ecore_chain_reset(&fp->rxq->rx_comp_ring);
+ fp->rxq->sw_rx_prod = 0;
+ fp->rxq->sw_rx_cons = 0;
+ *fp->rxq->hw_cons_ptr = 0;
+ for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
+ if (qede_alloc_rx_buffer(fp->rxq)) {
+ DP_ERR(&qdev->edev,
+ "RX buffer allocation failed\n");
+ return -ENOMEM;
+ }
+ }
+ }
+ if (fp->type & QEDE_FASTPATH_TX) {
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ txq = fp->txqs[tc];
+ qede_tx_queue_release_mbufs(txq);
+ ecore_chain_reset(&txq->tx_pbl);
+ txq->sw_tx_cons = 0;
+ txq->sw_tx_prod = 0;
+ *txq->hw_cons_ptr = 0;
+ }
}
}
+
+ return 0;
}
/* This function frees all memory of a single fp */
-static void qede_free_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp)
+void qede_free_mem_load(struct rte_eth_dev *eth_dev)
{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct qede_fastpath *fp;
+ uint16_t txq_idx;
+ uint8_t id;
uint8_t tc;
- qede_rx_queue_release(fp->rxq);
- for (tc = 0; tc < qdev->num_tc; tc++)
- qede_tx_queue_release(fp->txqs[tc]);
-}
-
-void qede_free_mem_load(struct qede_dev *qdev)
-{
- uint8_t rss_id;
-
- for_each_rss(rss_id) {
- struct qede_fastpath *fp = &qdev->fp_array[rss_id];
- qede_free_mem_fp(qdev, fp);
+ for_each_queue(id) {
+ fp = &qdev->fp_array[id];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ qede_rx_queue_release(fp->rxq);
+ eth_dev->data->rx_queues[id] = NULL;
+ } else {
+ for (tc = 0; tc < qdev->num_tc; tc++) {
+ txq_idx = fp->txqs[tc]->queue_id;
+ qede_tx_queue_release(fp->txqs[tc]);
+ eth_dev->data->tx_queues[txq_idx] = NULL;
+ }
+ }
}
- /* qdev->num_rss = 0; */
}
-/*
- * Stop an Ethernet device. The device can be restarted with a call to
- * rte_eth_dev_start().
- * Do not change link state and do not release sw structures.
- */
void qede_dev_stop(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- int rc;
DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
- if (qdev->state != QEDE_START) {
- DP_INFO(edev, "device not yet started\n");
+ if (qdev->state != QEDE_DEV_START) {
+ DP_INFO(edev, "Device not yet started\n");
return;
}
- rc = qede_stop_queues(qdev);
-
- if (rc)
+ if (qede_stop_queues(qdev))
DP_ERR(edev, "Didn't succeed to close queues\n");
DP_INFO(edev, "Stopped queues\n");
qdev->ops->fastpath_stop(edev);
- qede_reset_fp_rings(qdev);
+ /* Bring the link down */
+ qede_dev_set_link_state(eth_dev, false);
- qdev->state = QEDE_STOP;
+ qdev->state = QEDE_DEV_STOP;
- DP_INFO(edev, "dev_state is QEDE_STOP\n");
+ DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 34eaf8b6..ed9a529b 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -30,9 +30,6 @@
#define TX_CONS(txq) (txq->sw_tx_cons & NUM_TX_BDS(txq))
#define TX_PROD(txq) (txq->sw_tx_prod & NUM_TX_BDS(txq))
-/* Number of TX BDs per packet used currently */
-#define MAX_NUM_TX_BDS 1
-
#define QEDE_DEFAULT_TX_FREE_THRESH 32
#define QEDE_CSUM_ERROR (1 << 0)
@@ -44,6 +41,10 @@
(bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
(bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
(bd)->nbytes = rte_cpu_to_le_16(len); \
+ /* FW 8.10.x specific change */ \
+ (bd)->data.bitfields = ((len) & \
+ ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) \
+ << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; \
} while (0)
#define CQE_HAS_VLAN(flags) \
@@ -71,7 +72,7 @@
#define MAX_NUM_TC 8
-#define for_each_rss(i) for (i = 0; i < qdev->num_rss; i++)
+#define for_each_queue(i) for (i = 0; i < qdev->num_queues; i++)
/*
* RX BD descriptor ring
@@ -98,6 +99,8 @@ struct qede_rx_queue {
uint16_t queue_id;
uint16_t port_id;
uint16_t rx_buf_size;
+ uint64_t rcv_pkts;
+ uint64_t rx_segs;
uint64_t rx_hw_errors;
uint64_t rx_alloc_errors;
struct qede_dev *qdev;
@@ -129,13 +132,14 @@ struct qede_tx_queue {
void OSAL_IOMEM *doorbell_addr;
volatile union db_prod tx_db;
uint16_t port_id;
- uint64_t txq_counter;
+ uint64_t xmit_pkts;
struct qede_dev *qdev;
};
struct qede_fastpath {
struct qede_dev *qdev;
- uint8_t rss_id;
+ u8 type;
+ uint8_t id;
struct ecore_sb_info *sb_info;
struct qede_rx_queue *rxq;
struct qede_tx_queue *txqs[MAX_NUM_TC];
@@ -164,11 +168,11 @@ int qede_dev_start(struct rte_eth_dev *eth_dev);
void qede_dev_stop(struct rte_eth_dev *eth_dev);
-void qede_reset_fp_rings(struct qede_dev *qdev);
+int qede_reset_fp_rings(struct qede_dev *qdev);
void qede_free_fp_arrays(struct qede_dev *qdev);
-void qede_free_mem_load(struct qede_dev *qdev);
+void qede_free_mem_load(struct rte_eth_dev *eth_dev);
uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -176,4 +180,9 @@ uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+/* Fastpath resource alloc/dealloc helpers */
+int qede_alloc_fp_resc(struct qede_dev *qdev);
+
+void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev);
+
#endif /* _QEDE_RXTX_H_ */
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index 5551e187..c1767c48 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -38,7 +38,7 @@
#include <rte_memcpy.h>
#include <rte_memzone.h>
#include <rte_string_fns.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_kvargs.h>
#include <rte_errno.h>
@@ -303,7 +303,7 @@ do_eth_dev_ring_create(const char *name,
}
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL) {
rte_errno = ENOSPC;
goto error;
@@ -505,7 +505,7 @@ out:
}
static int
-rte_pmd_ring_devinit(const char *name, const char *params)
+rte_pmd_ring_probe(const char *name, const char *params)
{
struct rte_kvargs *kvlist = NULL;
int ret = 0;
@@ -580,7 +580,7 @@ out_free:
}
static int
-rte_pmd_ring_devuninit(const char *name)
+rte_pmd_ring_remove(const char *name)
{
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internals = NULL;
@@ -599,36 +599,34 @@ rte_pmd_ring_devuninit(const char *name)
eth_dev_stop(eth_dev);
- if (eth_dev->data) {
- internals = eth_dev->data->dev_private;
- if (internals->action == DEV_CREATE) {
- /*
- * it is only necessary to delete the rings in rx_queues because
- * they are the same used in tx_queues
- */
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- r = eth_dev->data->rx_queues[i];
- rte_ring_free(r->rng);
- }
+ internals = eth_dev->data->dev_private;
+ if (internals->action == DEV_CREATE) {
+ /*
+ * it is only necessary to delete the rings in rx_queues because
+ * they are the same used in tx_queues
+ */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ r = eth_dev->data->rx_queues[i];
+ rte_ring_free(r->rng);
}
-
- rte_free(eth_dev->data->rx_queues);
- rte_free(eth_dev->data->tx_queues);
- rte_free(eth_dev->data->dev_private);
}
+ rte_free(eth_dev->data->rx_queues);
+ rte_free(eth_dev->data->tx_queues);
+ rte_free(eth_dev->data->dev_private);
+
rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
return 0;
}
-static struct rte_driver pmd_ring_drv = {
- .type = PMD_VDEV,
- .init = rte_pmd_ring_devinit,
- .uninit = rte_pmd_ring_devuninit,
+static struct rte_vdev_driver pmd_ring_drv = {
+ .probe = rte_pmd_ring_probe,
+ .remove = rte_pmd_ring_remove,
};
-PMD_REGISTER_DRIVER(pmd_ring_drv, eth_ring);
-DRIVER_REGISTER_PARAM_STRING(eth_ring,
- "nodeaction=[attach|detach]");
+RTE_PMD_REGISTER_VDEV(net_ring, pmd_ring_drv);
+RTE_PMD_REGISTER_ALIAS(net_ring, eth_ring);
+RTE_PMD_REGISTER_PARAM_STRING(net_ring,
+ ETH_RING_NUMA_NODE_ACTION_ARG "=name:node:action(ATTACH|CREATE)");
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index 483d7894..f3cd52dc 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -62,7 +62,7 @@
*/
#define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
-#define RTE_SZEDATA2_DRIVER_NAME rte_szedata2_pmd
+#define RTE_SZEDATA2_DRIVER_NAME net_szedata2
#define RTE_SZEDATA2_PCI_DRIVER_NAME "rte_szedata2_pmd"
#define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
@@ -1416,7 +1416,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
int ret;
uint32_t szedata2_index;
struct rte_pci_addr *pci_addr = &dev->pci_dev->addr;
- struct rte_pci_resource *pci_rsc =
+ struct rte_mem_resource *pci_rsc =
&dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
char rsc_filename[PATH_MAX];
void *pci_resource_ptr = NULL;
@@ -1473,7 +1473,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
rte_eth_copy_pci_info(dev, dev->pci_dev);
- /* mmap pci resource0 file to rte_pci_resource structure */
+ /* mmap pci resource0 file to rte_mem_resource structure */
if (dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
0) {
RTE_LOG(ERR, PMD, "Missing resource%u file\n",
@@ -1572,33 +1572,14 @@ static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
static struct eth_driver szedata2_eth_driver = {
.pci_drv = {
- .name = RTE_SZEDATA2_PCI_DRIVER_NAME,
.id_table = rte_szedata2_pci_id_table,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = rte_szedata2_eth_dev_init,
.eth_dev_uninit = rte_szedata2_eth_dev_uninit,
.dev_private_size = sizeof(struct pmd_internals),
};
-static int
-rte_szedata2_init(const char *name __rte_unused,
- const char *args __rte_unused)
-{
- rte_eth_driver_register(&szedata2_eth_driver);
- return 0;
-}
-
-static int
-rte_szedata2_uninit(const char *name __rte_unused)
-{
- return 0;
-}
-
-static struct rte_driver rte_szedata2_driver = {
- .type = PMD_PDEV,
- .init = rte_szedata2_init,
- .uninit = rte_szedata2_uninit,
-};
-
-PMD_REGISTER_DRIVER(rte_szedata2_driver, RTE_SZEDATA2_DRIVER_NAME);
-DRIVER_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
+RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
diff --git a/drivers/net/thunderx/Makefile b/drivers/net/thunderx/Makefile
index 8ea6b454..bcab5f93 100644
--- a/drivers/net/thunderx/Makefile
+++ b/drivers/net/thunderx/Makefile
@@ -57,6 +57,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_hw.c
SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_mbox.c
SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_bsvf.c
+SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_svf.c
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
CFLAGS_nicvf_rxtx.o += -fno-prefetch-loop-arrays
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.c b/drivers/net/thunderx/base/nicvf_bsvf.c
new file mode 100644
index 00000000..9e028a3a
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_bsvf.c
@@ -0,0 +1,72 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+#include <err.h>
+
+#include "nicvf_bsvf.h"
+#include "nicvf_plat.h"
+
+static SIMPLEQ_HEAD(, svf_entry) head = SIMPLEQ_HEAD_INITIALIZER(head);
+
+void
+nicvf_bsvf_push(struct svf_entry *entry)
+{
+ assert(entry != NULL);
+ assert(entry->vf != NULL);
+
+ SIMPLEQ_INSERT_TAIL(&head, entry, next);
+}
+
+struct svf_entry *
+nicvf_bsvf_pop(void)
+{
+ struct svf_entry *entry;
+
+ assert(!SIMPLEQ_EMPTY(&head));
+
+ entry = SIMPLEQ_FIRST(&head);
+
+ assert(entry != NULL);
+ assert(entry->vf != NULL);
+
+ SIMPLEQ_REMOVE_HEAD(&head, next);
+
+ return entry;
+}
+
+int
+nicvf_bsvf_empty(void)
+{
+ return SIMPLEQ_EMPTY(&head);
+}
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.h b/drivers/net/thunderx/base/nicvf_bsvf.h
new file mode 100644
index 00000000..5d5a25e2
--- /dev/null
+++ b/drivers/net/thunderx/base/nicvf_bsvf.h
@@ -0,0 +1,76 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __THUNDERX_NICVF_BSVF_H__
+#define __THUNDERX_NICVF_BSVF_H__
+
+#include <sys/queue.h>
+
+struct nicvf;
+
+/**
+ * The base queue structure to hold secondary qsets.
+ */
+struct svf_entry {
+ SIMPLEQ_ENTRY(svf_entry) next; /**< Next element's pointer */
+ struct nicvf *vf; /**< Holder of a secondary qset */
+};
+
+/**
+ * Enqueue new entry to secondary qsets.
+ *
+ * @param entry
+ * Entry to be enqueued.
+ */
+void
+nicvf_bsvf_push(struct svf_entry *entry);
+
+/**
+ * Dequeue an entry from secondary qsets.
+ *
+ * @return
+ * Dequeued entry.
+ */
+struct svf_entry *
+nicvf_bsvf_pop(void);
+
+/**
+ * Check if the queue of secondary qsets is empty.
+ *
+ * @return
+ * 0 on non-empty
+ * otherwise empty
+ */
+int
+nicvf_bsvf_empty(void);
+
+#endif /* __THUNDERX_NICVF_BSVF_H__ */
diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c
index 001b0edd..04b3b69c 100644
--- a/drivers/net/thunderx/base/nicvf_hw.c
+++ b/drivers/net/thunderx/base/nicvf_hw.c
@@ -140,8 +140,15 @@ nicvf_base_init(struct nicvf *nic)
if (nic->subsystem_device_id == 0)
return NICVF_ERR_BASE_INIT;
- if (nicvf_hw_version(nic) == NICVF_PASS2)
- nic->hwcap |= NICVF_CAP_TUNNEL_PARSING;
+ if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
+ nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
+
+ if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
+ nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
+
+ if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN83XX_NICVF)
+ nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2 |
+ NICVF_CAP_DISABLE_APAD;
return NICVF_OK;
}
@@ -494,9 +501,9 @@ nicvf_qsize_rbdr_roundup(uint32_t val)
}
int
-nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
- rbdr_pool_get_handler handler,
- void *opaque, uint32_t max_buffs)
+nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
+ uint16_t ridx, rbdr_pool_get_handler handler,
+ uint32_t max_buffs)
{
struct rbdr_entry_t *desc, *desc0;
struct nicvf_rbdr *rbdr = nic->rbdr;
@@ -511,7 +518,7 @@ nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
if (count >= max_buffs)
break;
desc0 = desc + count;
- phy = handler(opaque);
+ phy = handler(dev, nic);
if (phy) {
desc0->full_addr = phy;
count++;
@@ -722,6 +729,24 @@ nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
}
void
+nicvf_apad_config(struct nicvf *nic, bool enable)
+{
+ uint64_t val;
+
+ /* APAD always enabled in this device */
+ if (!(nic->hwcap & NICVF_CAP_DISABLE_APAD))
+ return;
+
+ val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
+ if (enable)
+ val &= ~(1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
+ else
+ val |= (1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
+
+ nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
+}
+
+void
nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
{
int idx;
diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h
index 9db1d30c..14fb2feb 100644
--- a/drivers/net/thunderx/base/nicvf_hw.h
+++ b/drivers/net/thunderx/base/nicvf_hw.h
@@ -37,11 +37,13 @@
#include "nicvf_hw_defs.h"
-#define PCI_VENDOR_ID_CAVIUM 0x177D
-#define PCI_DEVICE_ID_THUNDERX_PASS1_NICVF 0x0011
-#define PCI_DEVICE_ID_THUNDERX_PASS2_NICVF 0xA034
-#define PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF 0xA11E
-#define PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF 0xA134
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF 0x0011
+#define PCI_DEVICE_ID_THUNDERX_NICVF 0xA034
+#define PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF 0xA11E
+#define PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF 0xA134
+#define PCI_SUB_DEVICE_ID_CN81XX_NICVF 0xA234
+#define PCI_SUB_DEVICE_ID_CN83XX_NICVF 0xA334
#define NICVF_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
@@ -50,10 +52,11 @@
#define NICVF_GET_TX_STATS(reg) \
nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
-#define NICVF_PASS1 (PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF)
-#define NICVF_PASS2 (PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF)
-
-#define NICVF_CAP_TUNNEL_PARSING (1ULL << 0)
+#define NICVF_CAP_TUNNEL_PARSING (1ULL << 0)
+/* Additional word in Rx descriptor to hold optional tunneling extension info */
+#define NICVF_CAP_CQE_RX2 (1ULL << 1)
+/* The device capable of setting NIC_CQE_RX_S[APAD] == 0 */
+#define NICVF_CAP_DISABLE_APAD (1ULL << 2)
enum nicvf_tns_mode {
NIC_TNS_BYPASS_MODE,
@@ -85,7 +88,7 @@ enum nicvf_err_e {
NICVF_ERR_RSS_GET_SZ, /* -8171 */
};
-typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *opaque);
+typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *dev, void *opaque);
struct nicvf_hw_rx_qstats {
uint64_t q_rx_bytes;
@@ -194,8 +197,8 @@ int nicvf_qset_reclaim(struct nicvf *nic);
int nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx);
int nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx);
-int nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx,
- rbdr_pool_get_handler handler, void *opaque,
+int nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
+ uint16_t ridx, rbdr_pool_get_handler handler,
uint32_t max_buffs);
int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx);
@@ -217,6 +220,8 @@ uint32_t nicvf_qsize_sq_roundup(uint32_t val);
void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable);
+void nicvf_apad_config(struct nicvf *nic, bool enable);
+
int nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg);
int nicvf_rss_term(struct nicvf *nic);
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index 2f2b2259..00dd2feb 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -105,6 +105,8 @@
#define NICVF_INTR_MBOX_SHIFT 22
#define NICVF_INTR_QS_ERR_SHIFT 23
+#define NICVF_QS_RQ_DIS_APAD_SHIFT 22
+
#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
@@ -207,6 +209,7 @@
#define NICVF_CQE_RX2_RBPTR_WORD (7)
#define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s)
+#define assert_primary(nic) assert((nic)->sqs_mode == 0)
typedef uint64_t nicvf_phys_addr_t;
diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c
index 9c5cd834..3b7b8a51 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.c
+++ b/drivers/net/thunderx/base/nicvf_mbox.c
@@ -62,6 +62,9 @@ static const char *mbox_message[NIC_MBOX_MSG_MAX] = {
[NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER",
[NIC_MBOX_MSG_CFG_DONE] = "NIC_MBOX_MSG_CFG_DONE",
[NIC_MBOX_MSG_SHUTDOWN] = "NIC_MBOX_MSG_SHUTDOWN",
+ [NIC_MBOX_MSG_RES_BIT] = "NIC_MBOX_MSG_RES_BIT",
+ [NIC_MBOX_MSG_RSS_SIZE_RES_BIT] = "NIC_MBOX_MSG_RSS_SIZE",
+ [NIC_MBOX_MSG_ALLOC_SQS_RES_BIT] = "NIC_MBOX_MSG_ALLOC_SQS",
};
static inline const char * __attribute__((unused))
@@ -173,7 +176,7 @@ nicvf_handle_mbx_intr(struct nicvf *nic)
case NIC_MBOX_MSG_NACK:
nic->pf_nacked = true;
break;
- case NIC_MBOX_MSG_RSS_SIZE:
+ case NIC_MBOX_MSG_RSS_SIZE_RES_BIT:
nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
nic->pf_acked = true;
break;
@@ -183,6 +186,26 @@ nicvf_handle_mbx_intr(struct nicvf *nic)
nic->speed = mbx.link_status.speed;
nic->pf_acked = true;
break;
+ case NIC_MBOX_MSG_ALLOC_SQS_RES_BIT:
+ assert_primary(nic);
+ if (mbx.sqs_alloc.qs_count != nic->sqs_count) {
+ nicvf_log_error("Received %" PRIu8 "/%" PRIu8
+ " secondary qsets",
+ mbx.sqs_alloc.qs_count,
+ nic->sqs_count);
+ abort();
+ }
+ for (i = 0; i < mbx.sqs_alloc.qs_count; i++) {
+ if (mbx.sqs_alloc.svf[i] != nic->snicvf[i]->vf_id) {
+ nicvf_log_error("Received secondary qset[%zu] "
+ "ID %" PRIu8 " expected %"
+ PRIu8, i, mbx.sqs_alloc.svf[i],
+ nic->snicvf[i]->vf_id);
+ abort();
+ }
+ }
+ nic->pf_acked = true;
+ break;
default:
nicvf_log_error("Invalid message from PF, msg_id=0x%hhx %s",
mbx.msg.msg, nicvf_mbox_msg_str(mbx.msg.msg));
@@ -314,11 +337,33 @@ nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg)
/* Send a mailbox msg to PF to config Qset */
mbx.msg.msg = NIC_MBOX_MSG_QS_CFG;
mbx.qs.num = nic->vf_id;
+ mbx.qs.sqs_count = nic->sqs_count;
mbx.qs.cfg = qs_cfg->value;
return nicvf_mbox_send_msg_to_pf(nic, &mbx);
}
int
+nicvf_mbox_request_sqs(struct nicvf *nic)
+{
+ struct nic_mbx mbx = { .msg = { 0 } };
+ size_t i;
+
+ assert_primary(nic);
+ assert(nic->sqs_count > 0);
+ assert(nic->sqs_count <= MAX_SQS_PER_VF);
+
+ mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+ mbx.sqs_alloc.spec = 1;
+ mbx.sqs_alloc.qs_count = nic->sqs_count;
+
+ /* Set no of Rx/Tx queues in each of the SQsets */
+ for (i = 0; i < nic->sqs_count; i++)
+ mbx.sqs_alloc.svf[i] = nic->snicvf[i]->vf_id;
+
+ return nicvf_mbox_send_msg_to_pf(nic, &mbx);
+}
+
+int
nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable)
{
struct nic_mbx mbx = { .msg = { 0 } };
diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h
index 7c0c6a97..084f3a76 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.h
+++ b/drivers/net/thunderx/base/nicvf_mbox.h
@@ -36,6 +36,7 @@
#include <stdint.h>
#include "nicvf_plat.h"
+#include "../nicvf_struct.h"
/* PF <--> VF Mailbox communication
* Two 64bit registers are shared between PF and VF for each VF
@@ -67,10 +68,16 @@
#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
-#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
-#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
+#define NIC_MBOX_MSG_CFG_DONE 0x7E /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0x7F /* VF is being shutdown */
+#define NIC_MBOX_MSG_RES_BIT 0x80 /* Reset bit from PF */
#define NIC_MBOX_MSG_MAX 0x100 /* Maximum number of messages */
+#define NIC_MBOX_MSG_RSS_SIZE_RES_BIT \
+ (NIC_MBOX_MSG_RSS_SIZE | NIC_MBOX_MSG_RES_BIT)
+#define NIC_MBOX_MSG_ALLOC_SQS_RES_BIT \
+ (NIC_MBOX_MSG_ALLOC_SQS | NIC_MBOX_MSG_RES_BIT)
+
/* Get vNIC VF configuration */
struct nic_cfg_msg {
uint8_t msg;
@@ -155,6 +162,14 @@ struct bgx_link_status {
uint32_t speed;
};
+/* Allocate additional SQS to VF */
+struct sqs_alloc {
+ uint8_t msg;
+ uint8_t spec;
+ uint8_t qs_count;
+ uint8_t svf[MAX_SQS_PER_VF];
+};
+
/* Set interface in loopback mode */
struct set_loopback {
uint8_t msg;
@@ -201,6 +216,7 @@ union {
struct rss_sz_msg rss_size;
struct rss_cfg_msg rss_cfg;
struct bgx_link_status link_status;
+ struct sqs_alloc sqs_alloc;
struct set_loopback lbk;
struct reset_stat_cfg reset_stat;
};
@@ -211,6 +227,7 @@ NICVF_STATIC_ASSERT(sizeof(struct nic_mbx) <= 16);
int nicvf_handle_mbx_intr(struct nicvf *nic);
int nicvf_mbox_check_pf_ready(struct nicvf *nic);
int nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg);
+int nicvf_mbox_request_sqs(struct nicvf *nic);
int nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx,
struct pf_rq_cfg *pf_rq_cfg);
int nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx);
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 4f875c02..466e49ce 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -67,9 +67,13 @@
#include "nicvf_ethdev.h"
#include "nicvf_rxtx.h"
+#include "nicvf_svf.h"
#include "nicvf_logs.h"
static void nicvf_dev_stop(struct rte_eth_dev *dev);
+static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
+static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
+ bool cleanup);
static inline int
nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
@@ -101,31 +105,40 @@ nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
static void
nicvf_interrupt(void *arg)
{
- struct nicvf *nic = arg;
+ struct rte_eth_dev *dev = arg;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
- if (nic->eth_dev->data->dev_conf.intr_conf.lsc)
- nicvf_set_eth_link_status(nic,
- &nic->eth_dev->data->dev_link);
- _rte_eth_dev_callback_process(nic->eth_dev,
- RTE_ETH_EVENT_INTR_LSC);
+ if (dev->data->dev_conf.intr_conf.lsc)
+ nicvf_set_eth_link_status(nic, &dev->data->dev_link);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
- nicvf_interrupt, nic);
+ nicvf_interrupt, dev);
+}
+
+static void
+nicvf_vf_interrupt(void *arg)
+{
+ struct nicvf *nic = arg;
+
+ nicvf_reg_poll_interrupts(nic);
+
+ rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
+ nicvf_vf_interrupt, nic);
}
static int
-nicvf_periodic_alarm_start(struct nicvf *nic)
+nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
{
- return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
- nicvf_interrupt, nic);
+ return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
}
static int
-nicvf_periodic_alarm_stop(struct nicvf *nic)
+nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
{
- return rte_eal_alarm_cancel(nicvf_interrupt, nic);
+ return rte_eal_alarm_cancel(fn, arg);
}
/*
@@ -150,6 +163,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ size_t i;
PMD_INIT_FUNC_TRACE();
@@ -185,6 +199,10 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* Update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
nic->mtu = mtu;
+
+ for (i = 0; i < nic->sqs_count; i++)
+ nic->snicvf[i]->mtu = mtu;
+
return 0;
}
@@ -218,9 +236,15 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
struct nicvf_hw_tx_qstats tx_qstats;
struct nicvf_hw_stats port_stats;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint16_t rx_start, rx_end;
+ uint16_t tx_start, tx_end;
+ size_t i;
+
+ /* RX queue indices for the first VF */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
/* Reading per RX ring stats */
- for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
@@ -229,8 +253,11 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
}
+ /* TX queue indices for the first VF */
+ nicvf_tx_range(dev, nic, &tx_start, &tx_end);
+
/* Reading per TX ring stats */
- for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) {
+ for (qidx = tx_start; qidx <= tx_end; qidx++) {
if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
@@ -239,6 +266,40 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
}
+ for (i = 0; i < nic->sqs_count; i++) {
+ struct nicvf *snic = nic->snicvf[i];
+
+ if (snic == NULL)
+ break;
+
+ /* RX queue indices for a secondary VF */
+ nicvf_rx_range(dev, snic, &rx_start, &rx_end);
+
+ /* Reading per RX ring stats */
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nicvf_hw_get_rx_qstats(snic, &rx_qstats,
+ qidx % MAX_RCV_QUEUES_PER_QS);
+ stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
+ stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
+ }
+
+ /* TX queue indices for a secondary VF */
+ nicvf_tx_range(dev, snic, &tx_start, &tx_end);
+ /* Reading per TX ring stats */
+ for (qidx = tx_start; qidx <= tx_end; qidx++) {
+ if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ break;
+
+ nicvf_hw_get_tx_qstats(snic, &tx_qstats,
+ qidx % MAX_SND_QUEUES_PER_QS);
+ stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
+ stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
+ }
+ }
+
nicvf_hw_get_stats(nic, &port_stats);
stats->ibytes = port_stats.rx_bytes;
stats->ipackets = port_stats.rx_ucast_frames;
@@ -265,7 +326,7 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
size_t copied;
static uint32_t ptypes[32];
struct nicvf *nic = nicvf_pmd_priv(dev);
- static const uint32_t ptypes_pass1[] = {
+ static const uint32_t ptypes_common[] = {
RTE_PTYPE_L3_IPV4,
RTE_PTYPE_L3_IPV4_EXT,
RTE_PTYPE_L3_IPV6,
@@ -274,7 +335,7 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L4_UDP,
RTE_PTYPE_L4_FRAG,
};
- static const uint32_t ptypes_pass2[] = {
+ static const uint32_t ptypes_tunnel[] = {
RTE_PTYPE_TUNNEL_GRE,
RTE_PTYPE_TUNNEL_GENEVE,
RTE_PTYPE_TUNNEL_VXLAN,
@@ -282,12 +343,12 @@ nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
};
static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
- copied = sizeof(ptypes_pass1);
- memcpy(ptypes, ptypes_pass1, copied);
- if (nicvf_hw_version(nic) == NICVF_PASS2) {
- memcpy((char *)ptypes + copied, ptypes_pass2,
- sizeof(ptypes_pass2));
- copied += sizeof(ptypes_pass2);
+ copied = sizeof(ptypes_common);
+ memcpy(ptypes, ptypes_common, copied);
+ if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
+ memcpy((char *)ptypes + copied, ptypes_tunnel,
+ sizeof(ptypes_tunnel));
+ copied += sizeof(ptypes_tunnel);
}
memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
@@ -304,13 +365,36 @@ nicvf_dev_stats_reset(struct rte_eth_dev *dev)
int i;
uint16_t rxqs = 0, txqs = 0;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint16_t rx_start, rx_end;
+ uint16_t tx_start, tx_end;
- for (i = 0; i < dev->data->nb_rx_queues; i++)
+ /* Reset all primary nic counters */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
+ for (i = rx_start; i <= rx_end; i++)
rxqs |= (0x3 << (i * 2));
- for (i = 0; i < dev->data->nb_tx_queues; i++)
+
+ nicvf_tx_range(dev, nic, &tx_start, &tx_end);
+ for (i = tx_start; i <= tx_end; i++)
txqs |= (0x3 << (i * 2));
nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
+
+ /* Reset secondary nic queue counters */
+ for (i = 0; i < nic->sqs_count; i++) {
+ struct nicvf *snic = nic->snicvf[i];
+ if (snic == NULL)
+ break;
+
+ nicvf_rx_range(dev, snic, &rx_start, &rx_end);
+ for (i = rx_start; i <= rx_end; i++)
+ rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
+
+ nicvf_tx_range(dev, snic, &tx_start, &tx_end);
+ for (i = tx_start; i <= tx_end; i++)
+ txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
+
+ nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
+ }
}
/* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
@@ -488,14 +572,15 @@ nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
}
static int
-nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx,
- uint32_t desc_cnt)
+nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
+ struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
{
const struct rte_memzone *rz;
uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
- rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size,
- NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
+ rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
+ nicvf_netdev_qidx(nic, qidx), ring_size,
+ NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
if (rz == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
return -ENOMEM;
@@ -511,14 +596,15 @@ nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx,
}
static int
-nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx,
- uint32_t desc_cnt)
+nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
+ struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
{
const struct rte_memzone *rz;
uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
- rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size,
- NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
+ rz = rte_eth_dma_zone_reserve(dev, "sq",
+ nicvf_netdev_qidx(nic, qidx), ring_size,
+ NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
if (rz == NULL) {
PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
return -ENOMEM;
@@ -534,7 +620,8 @@ nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx,
}
static int
-nicvf_qset_rbdr_alloc(struct nicvf *nic, uint32_t desc_cnt, uint32_t buffsz)
+nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint32_t desc_cnt, uint32_t buffsz)
{
struct nicvf_rbdr *rbdr;
const struct rte_memzone *rz;
@@ -549,8 +636,9 @@ nicvf_qset_rbdr_alloc(struct nicvf *nic, uint32_t desc_cnt, uint32_t buffsz)
}
ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
- rz = rte_eth_dma_zone_reserve(nic->eth_dev, "rbdr", 0, ring_size,
- NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
+ rz = rte_eth_dma_zone_reserve(dev, "rbdr",
+ nicvf_netdev_qidx(nic, 0), ring_size,
+ NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
if (rz == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
return -ENOMEM;
@@ -574,14 +662,19 @@ nicvf_qset_rbdr_alloc(struct nicvf *nic, uint32_t desc_cnt, uint32_t buffsz)
}
static void
-nicvf_rbdr_release_mbuf(struct nicvf *nic, nicvf_phys_addr_t phy)
+nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
+ nicvf_phys_addr_t phy)
{
uint16_t qidx;
void *obj;
struct nicvf_rxq *rxq;
+ uint16_t rx_start, rx_end;
- for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
- rxq = nic->eth_dev->data->rx_queues[qidx];
+ /* Get queue ranges for this VF */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
+
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
if (rxq->precharge_cnt) {
obj = (void *)nicvf_mbuff_phy2virt(phy,
rxq->mbuf_phys_off);
@@ -593,7 +686,7 @@ nicvf_rbdr_release_mbuf(struct nicvf *nic, nicvf_phys_addr_t phy)
}
static inline void
-nicvf_rbdr_release_mbufs(struct nicvf *nic)
+nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
{
uint32_t qlen_mask, head;
struct rbdr_entry_t *entry;
@@ -603,7 +696,7 @@ nicvf_rbdr_release_mbufs(struct nicvf *nic)
head = rbdr->head;
while (head != rbdr->tail) {
entry = rbdr->desc + head;
- nicvf_rbdr_release_mbuf(nic, entry->full_addr);
+ nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
head++;
head = head & qlen_mask;
}
@@ -638,48 +731,60 @@ nicvf_tx_queue_reset(struct nicvf_txq *txq)
}
static inline int
-nicvf_start_tx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint16_t qidx)
{
struct nicvf_txq *txq;
int ret;
- if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ assert(qidx < MAX_SND_QUEUES_PER_QS);
+
+ if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
+ RTE_ETH_QUEUE_STATE_STARTED)
return 0;
- txq = dev->data->tx_queues[qidx];
+ txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
txq->pool = NULL;
- ret = nicvf_qset_sq_config(nicvf_pmd_priv(dev), qidx, txq);
+ ret = nicvf_qset_sq_config(nic, qidx, txq);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to configure sq %d %d", qidx, ret);
+ PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
+ nic->vf_id, qidx, ret);
goto config_sq_error;
}
- dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STARTED;
return ret;
config_sq_error:
- nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx);
+ nicvf_qset_sq_reclaim(nic, qidx);
return ret;
}
static inline int
-nicvf_stop_tx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint16_t qidx)
{
struct nicvf_txq *txq;
int ret;
- if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ assert(qidx < MAX_SND_QUEUES_PER_QS);
+
+ if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
+ RTE_ETH_QUEUE_STATE_STOPPED)
return 0;
- ret = nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx);
+ ret = nicvf_qset_sq_reclaim(nic, qidx);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to reclaim sq %d %d", qidx, ret);
+ PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
+ nic->vf_id, qidx, ret);
- txq = dev->data->tx_queues[qidx];
+ txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
nicvf_tx_queue_release_mbufs(txq);
nicvf_tx_queue_reset(txq);
- dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
return ret;
}
@@ -691,7 +796,7 @@ nicvf_configure_cpi(struct rte_eth_dev *dev)
int ret;
/* Count started rx queues */
- for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++)
+ for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
if (dev->data->rx_queue_state[qidx] ==
RTE_ETH_QUEUE_STATE_STARTED)
qcnt++;
@@ -715,14 +820,13 @@ nicvf_configure_rss(struct rte_eth_dev *dev)
dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
dev->data->dev_conf.rxmode.mq_mode,
- nic->eth_dev->data->nb_rx_queues,
- nic->eth_dev->data->dev_conf.lpbk_mode, rsshf);
+ dev->data->nb_rx_queues,
+ dev->data->dev_conf.lpbk_mode, rsshf);
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
ret = nicvf_rss_term(nic);
else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
- ret = nicvf_rss_config(nic,
- nic->eth_dev->data->nb_rx_queues, rsshf);
+ ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
if (ret)
PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
@@ -827,6 +931,11 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_INIT_FUNC_TRACE();
+ if (qidx >= MAX_SND_QUEUES_PER_QS)
+ nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
+
+ qidx = qidx % MAX_SND_QUEUES_PER_QS;
+
/* Socket id check */
if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
@@ -861,18 +970,20 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
}
/* Free memory prior to re-allocation if needed. */
- if (dev->data->tx_queues[qidx] != NULL) {
+ if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
- qidx);
- nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]);
- dev->data->tx_queues[qidx] = NULL;
+ nicvf_netdev_qidx(nic, qidx));
+ nicvf_dev_tx_queue_release(
+ dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
+ dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
}
/* Allocating tx queue data structure */
txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
RTE_CACHE_LINE_SIZE, nic->node);
if (txq == NULL) {
- PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx);
+ PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
+ nicvf_netdev_qidx(nic, qidx));
return -ENOMEM;
}
@@ -906,7 +1017,7 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
return -ENOMEM;
}
- if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) {
+ if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
nicvf_dev_tx_queue_release(txq);
return -ENOMEM;
@@ -915,26 +1026,28 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
nicvf_tx_queue_reset(txq);
PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
- qidx, txq, nb_desc, txq->desc, txq->phys);
+ nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
+ txq->phys);
- dev->data->tx_queues[qidx] = txq;
- dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
+ dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
static inline void
-nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq)
+nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
{
uint32_t rxq_cnt;
uint32_t nb_pkts, released_pkts = 0;
uint32_t refill_cnt = 0;
- struct rte_eth_dev *dev = rxq->nic->eth_dev;
struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
if (dev->rx_pkt_burst == NULL)
return;
- while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) {
+ while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
+ nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
NICVF_MAX_RX_FREE_THRESH);
PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt);
@@ -944,7 +1057,10 @@ nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq)
}
}
- refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id);
+
+ refill_cnt += nicvf_dev_rbdr_refill(dev,
+ nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
+
PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d",
released_pkts, refill_cnt);
}
@@ -958,31 +1074,37 @@ nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
}
static inline int
-nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint16_t qidx)
{
- struct nicvf *nic = nicvf_pmd_priv(dev);
struct nicvf_rxq *rxq;
int ret;
- if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ assert(qidx < MAX_RCV_QUEUES_PER_QS);
+
+ if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
+ RTE_ETH_QUEUE_STATE_STARTED)
return 0;
/* Update rbdr pointer to all rxq */
- rxq = dev->data->rx_queues[qidx];
+ rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
rxq->shared_rbdr = nic->rbdr;
ret = nicvf_qset_rq_config(nic, qidx, rxq);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret);
+ PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
+ nic->vf_id, qidx, ret);
goto config_rq_error;
}
ret = nicvf_qset_cq_config(nic, qidx, rxq);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret);
+ PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
+ nic->vf_id, qidx, ret);
goto config_cq_error;
}
- dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+ dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STARTED;
return 0;
config_cq_error:
@@ -993,50 +1115,57 @@ config_rq_error:
}
static inline int
-nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx)
+nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
+ uint16_t qidx)
{
- struct nicvf *nic = nicvf_pmd_priv(dev);
struct nicvf_rxq *rxq;
int ret, other_error;
- if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
+ RTE_ETH_QUEUE_STATE_STOPPED)
return 0;
ret = nicvf_qset_rq_reclaim(nic, qidx);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to reclaim rq %d %d", qidx, ret);
+ PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
+ nic->vf_id, qidx, ret);
other_error = ret;
- rxq = dev->data->rx_queues[qidx];
- nicvf_rx_queue_release_mbufs(rxq);
+ rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
+ nicvf_rx_queue_release_mbufs(dev, rxq);
nicvf_rx_queue_reset(rxq);
ret = nicvf_qset_cq_reclaim(nic, qidx);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to reclaim cq %d %d", qidx, ret);
+ PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
+ nic->vf_id, qidx, ret);
other_error |= ret;
- dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
return other_error;
}
static void
nicvf_dev_rx_queue_release(void *rx_queue)
{
- struct nicvf_rxq *rxq = rx_queue;
-
PMD_INIT_FUNC_TRACE();
- if (rxq)
- rte_free(rxq);
+ rte_free(rx_queue);
}
static int
nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
{
+ struct nicvf *nic = nicvf_pmd_priv(dev);
int ret;
- ret = nicvf_start_rx_queue(dev, qidx);
+ if (qidx >= MAX_RCV_QUEUES_PER_QS)
+ nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
+
+ qidx = qidx % MAX_RCV_QUEUES_PER_QS;
+
+ ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
if (ret)
return ret;
@@ -1051,8 +1180,14 @@ static int
nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
{
int ret;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
- ret = nicvf_stop_rx_queue(dev, qidx);
+ if (qidx >= MAX_SND_QUEUES_PER_QS)
+ nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
+
+ qidx = qidx % MAX_RCV_QUEUES_PER_QS;
+
+ ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
ret |= nicvf_configure_cpi(dev);
ret |= nicvf_configure_rss_reta(dev);
return ret;
@@ -1061,15 +1196,30 @@ nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
static int
nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
{
- return nicvf_start_tx_queue(dev, qidx);
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (qidx >= MAX_SND_QUEUES_PER_QS)
+ nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
+
+ qidx = qidx % MAX_SND_QUEUES_PER_QS;
+
+ return nicvf_vf_start_tx_queue(dev, nic, qidx);
}
static int
nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
{
- return nicvf_stop_tx_queue(dev, qidx);
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+
+ if (qidx >= MAX_SND_QUEUES_PER_QS)
+ nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
+
+ qidx = qidx % MAX_SND_QUEUES_PER_QS;
+
+ return nicvf_vf_stop_tx_queue(dev, nic, qidx);
}
+
static int
nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
uint16_t nb_desc, unsigned int socket_id,
@@ -1082,14 +1232,25 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
PMD_INIT_FUNC_TRACE();
+ if (qidx >= MAX_RCV_QUEUES_PER_QS)
+ nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
+
+ qidx = qidx % MAX_RCV_QUEUES_PER_QS;
+
/* Socket id check */
if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
socket_id, nic->node);
- /* Mempool memory should be contiguous */
+ /* Mempool memory must be contiguous, so must be one memory segment*/
if (mp->nb_mem_chunks != 1) {
- PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz");
+ PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
+ return -EINVAL;
+ }
+
+ /* Mempool memory must be physically contiguous */
+ if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
+ PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
return -EINVAL;
}
@@ -1118,18 +1279,20 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
}
/* Free memory prior to re-allocation if needed */
- if (dev->data->rx_queues[qidx] != NULL) {
+ if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
- qidx);
- nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]);
- dev->data->rx_queues[qidx] = NULL;
+ nicvf_netdev_qidx(nic, qidx));
+ nicvf_dev_rx_queue_release(
+ dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
+ dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
}
/* Allocate rxq memory */
rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
RTE_CACHE_LINE_SIZE, nic->node);
if (rxq == NULL) {
- PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx);
+ PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
+ nicvf_netdev_qidx(nic, qidx));
return -ENOMEM;
}
@@ -1142,10 +1305,15 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
rxq->precharge_cnt = 0;
- rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+
+ if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
+ rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
+ else
+ rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+
/* Alloc completion queue */
- if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) {
+ if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
nicvf_dev_rx_queue_release(rxq);
return -ENOMEM;
@@ -1154,11 +1322,12 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
nicvf_rx_queue_reset(rxq);
PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
- qidx, rxq, mp->name, nb_desc,
+ nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
rte_mempool_avail_count(mp), rxq->phys);
- dev->data->rx_queues[qidx] = rxq;
- dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
+ dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
@@ -1171,8 +1340,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
- dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS;
- dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS;
+ dev_info->max_rx_queues =
+ (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
+ dev_info->max_tx_queues =
+ (uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_mac_addrs = 1;
dev_info->max_vfs = dev->pci_dev->max_vfs;
@@ -1207,15 +1378,20 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
}
static nicvf_phys_addr_t
-rbdr_rte_mempool_get(void *opaque)
+rbdr_rte_mempool_get(void *dev, void *opaque)
{
uint16_t qidx;
uintptr_t mbuf;
struct nicvf_rxq *rxq;
- struct nicvf *nic = nicvf_pmd_priv((struct rte_eth_dev *)opaque);
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
+ struct nicvf *nic = (struct nicvf *)opaque;
+ uint16_t rx_start, rx_end;
+
+ /* Get queue ranges for this VF */
+ nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
- for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
- rxq = nic->eth_dev->data->rx_queues[qidx];
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ rxq = eth_dev->data->rx_queues[qidx];
/* Maintain equal buffer count across all pools */
if (rxq->precharge_cnt >= rxq->qlen_mask)
continue;
@@ -1228,25 +1404,25 @@ rbdr_rte_mempool_get(void *opaque)
}
static int
-nicvf_dev_start(struct rte_eth_dev *dev)
+nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
{
int ret;
uint16_t qidx;
- uint32_t buffsz = 0, rbdrsz = 0;
uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
uint64_t mbuf_phys_off = 0;
struct nicvf_rxq *rxq;
- struct rte_pktmbuf_pool_private *mbp_priv;
struct rte_mbuf *mbuf;
- struct nicvf *nic = nicvf_pmd_priv(dev);
- struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
- uint16_t mtu;
+ uint16_t rx_start, rx_end;
+ uint16_t tx_start, tx_end;
PMD_INIT_FUNC_TRACE();
/* Userspace process exited without proper shutdown in last run */
if (nicvf_qset_rbdr_active(nic, 0))
- nicvf_dev_stop(dev);
+ nicvf_vf_stop(dev, nic, false);
+
+ /* Get queue ranges for this VF */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
/*
* Thunderx nicvf PMD can support more than one pool per port only when
@@ -1261,32 +1437,15 @@ nicvf_dev_start(struct rte_eth_dev *dev)
*
*/
- /* Validate RBDR buff size */
- for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
- rxq = dev->data->rx_queues[qidx];
- mbp_priv = rte_mempool_get_priv(rxq->pool);
- buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
- if (buffsz % 128) {
- PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
- return -EINVAL;
- }
- if (rbdrsz == 0)
- rbdrsz = buffsz;
- if (rbdrsz != buffsz) {
- PMD_INIT_LOG(ERR, "buffsz not same, qid=%d (%d/%d)",
- qidx, rbdrsz, buffsz);
- return -EINVAL;
- }
- }
-
/* Validate mempool attributes */
- for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
rxq = dev->data->rx_queues[qidx];
rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
mbuf = rte_pktmbuf_alloc(rxq->pool);
if (mbuf == NULL) {
- PMD_INIT_LOG(ERR, "Failed allocate mbuf qid=%d pool=%s",
- qidx, rxq->pool->name);
+ PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
+ "pool=%s",
+ nic->vf_id, qidx, rxq->pool->name);
return -ENOMEM;
}
rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf);
@@ -1296,20 +1455,21 @@ nicvf_dev_start(struct rte_eth_dev *dev)
if (mbuf_phys_off == 0)
mbuf_phys_off = rxq->mbuf_phys_off;
if (mbuf_phys_off != rxq->mbuf_phys_off) {
- PMD_INIT_LOG(ERR, "pool params not same,%s %" PRIx64,
- rxq->pool->name, mbuf_phys_off);
+ PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
+ PRIx64, rxq->pool->name, nic->vf_id,
+ mbuf_phys_off);
return -EINVAL;
}
}
/* Check the level of buffers in the pool */
total_rxq_desc = 0;
- for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
rxq = dev->data->rx_queues[qidx];
/* Count total numbers of rxq descs */
total_rxq_desc += rxq->qlen_mask + 1;
exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
- exp_buffs *= nic->eth_dev->data->nb_rx_queues;
+ exp_buffs *= dev->data->nb_rx_queues;
if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
rxq->pool->name,
@@ -1322,82 +1482,171 @@ nicvf_dev_start(struct rte_eth_dev *dev)
/* Check RBDR desc overflow */
ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
if (ret == 0) {
- PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc");
+ PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
+ "VF%d", nic->vf_id);
return -ENOMEM;
}
/* Enable qset */
ret = nicvf_qset_config(nic);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to enable qset %d", ret);
+ PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
+ nic->vf_id);
return ret;
}
/* Allocate RBDR and RBDR ring desc */
nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
- ret = nicvf_qset_rbdr_alloc(nic, nb_rbdr_desc, rbdrsz);
+ ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc");
+ PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
+ "VF%d", nic->vf_id);
goto qset_reclaim;
}
/* Enable and configure RBDR registers */
ret = nicvf_qset_rbdr_config(nic, 0);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to configure rbdr %d", ret);
+ PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
+ nic->vf_id);
goto qset_rbdr_free;
}
/* Fill rte_mempool buffers in RBDR pool and precharge it */
- ret = nicvf_qset_rbdr_precharge(nic, 0, rbdr_rte_mempool_get,
- dev, total_rxq_desc);
+ ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
+ total_rxq_desc);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to fill rbdr %d", ret);
+ PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
+ nic->vf_id);
goto qset_rbdr_reclaim;
}
- PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR",
- nic->rbdr->tail, nb_rbdr_desc);
+ PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
+ nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
+
+ /* Configure VLAN Strip */
+ nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
+
+ /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
+ * to the 64bit memory address.
+ * The alignment creates a hole in mbuf(between the end of headroom and
+ * packet data start). The new revision of the HW provides an option to
+ * disable the L3 alignment feature and make mbuf layout looks
+ * more like other NICs. For better application compatibility, disabling
+ * l3 alignment feature on the hardware revisions it supports
+ */
+ nicvf_apad_config(nic, false);
+
+ /* Get queue ranges for this VF */
+ nicvf_tx_range(dev, nic, &tx_start, &tx_end);
+
+ /* Configure TX queues */
+ for (qidx = tx_start; qidx <= tx_end; qidx++) {
+ ret = nicvf_vf_start_tx_queue(dev, nic,
+ qidx % MAX_SND_QUEUES_PER_QS);
+ if (ret)
+ goto start_txq_error;
+ }
/* Configure RX queues */
- for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) {
- ret = nicvf_start_rx_queue(dev, qidx);
+ for (qidx = rx_start; qidx <= rx_end; qidx++) {
+ ret = nicvf_vf_start_rx_queue(dev, nic,
+ qidx % MAX_RCV_QUEUES_PER_QS);
if (ret)
goto start_rxq_error;
}
- /* Configure VLAN Strip */
- nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
-
- /* Configure TX queues */
- for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++) {
- ret = nicvf_start_tx_queue(dev, qidx);
+ if (!nic->sqs_mode) {
+ /* Configure CPI algorithm */
+ ret = nicvf_configure_cpi(dev);
if (ret)
goto start_txq_error;
+
+ ret = nicvf_mbox_get_rss_size(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to get rss table size");
+ goto qset_rss_error;
+ }
+
+ /* Configure RSS */
+ ret = nicvf_configure_rss(dev);
+ if (ret)
+ goto qset_rss_error;
}
- /* Configure CPI algorithm */
- ret = nicvf_configure_cpi(dev);
- if (ret)
- goto start_txq_error;
+ /* Done; Let PF make the BGX's RX and TX switches to ON position */
+ nicvf_mbox_cfg_done(nic);
+ return 0;
- /* Configure RSS */
- ret = nicvf_configure_rss(dev);
- if (ret)
- goto qset_rss_error;
+qset_rss_error:
+ nicvf_rss_term(nic);
+start_rxq_error:
+ for (qidx = rx_start; qidx <= rx_end; qidx++)
+ nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
+start_txq_error:
+ for (qidx = tx_start; qidx <= tx_end; qidx++)
+ nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
+qset_rbdr_reclaim:
+ nicvf_qset_rbdr_reclaim(nic, 0);
+ nicvf_rbdr_release_mbufs(dev, nic);
+qset_rbdr_free:
+ if (nic->rbdr) {
+ rte_free(nic->rbdr);
+ nic->rbdr = NULL;
+ }
+qset_reclaim:
+ nicvf_qset_reclaim(nic);
+ return ret;
+}
+
+static int
+nicvf_dev_start(struct rte_eth_dev *dev)
+{
+ uint16_t qidx;
+ int ret;
+ size_t i;
+ struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ uint16_t mtu;
+ uint32_t buffsz = 0, rbdrsz = 0;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct nicvf_rxq *rxq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* This function must be called for a primary device */
+ assert_primary(nic);
+
+ /* Validate RBDR buff size */
+ for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
+ rxq = dev->data->rx_queues[qidx];
+ mbp_priv = rte_mempool_get_priv(rxq->pool);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+ if (buffsz % 128) {
+ PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
+ return -EINVAL;
+ }
+ if (rbdrsz == 0)
+ rbdrsz = buffsz;
+ if (rbdrsz != buffsz) {
+ PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
+ qidx, rbdrsz, buffsz);
+ return -EINVAL;
+ }
+ }
/* Configure loopback */
ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
- goto qset_rss_error;
+ return ret;
}
/* Reset all statistics counters attached to this port */
ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
- goto qset_rss_error;
+ return ret;
}
/* Setup scatter mode if needed by jumbo */
@@ -1418,62 +1667,94 @@ nicvf_dev_start(struct rte_eth_dev *dev)
return -EBUSY;
}
+ ret = nicvf_vf_start(dev, nic, rbdrsz);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < nic->sqs_count; i++) {
+ assert(nic->snicvf[i]);
+
+ ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
+ if (ret != 0)
+ return ret;
+ }
+
/* Configure callbacks based on scatter mode */
nicvf_set_tx_function(dev);
nicvf_set_rx_function(dev);
- /* Done; Let PF make the BGX's RX and TX switches to ON position */
- nicvf_mbox_cfg_done(nic);
return 0;
-
-qset_rss_error:
- nicvf_rss_term(nic);
-start_txq_error:
- for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++)
- nicvf_stop_tx_queue(dev, qidx);
-start_rxq_error:
- for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++)
- nicvf_stop_rx_queue(dev, qidx);
-qset_rbdr_reclaim:
- nicvf_qset_rbdr_reclaim(nic, 0);
- nicvf_rbdr_release_mbufs(nic);
-qset_rbdr_free:
- if (nic->rbdr) {
- rte_free(nic->rbdr);
- nic->rbdr = NULL;
- }
-qset_reclaim:
- nicvf_qset_reclaim(nic);
- return ret;
}
static void
-nicvf_dev_stop(struct rte_eth_dev *dev)
+nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
{
+ size_t i;
int ret;
- uint16_t qidx;
struct nicvf *nic = nicvf_pmd_priv(dev);
PMD_INIT_FUNC_TRACE();
- /* Let PF make the BGX's RX and TX switches to OFF position */
- nicvf_mbox_shutdown(nic);
+ /* Teardown secondary vf first */
+ for (i = 0; i < nic->sqs_count; i++) {
+ if (!nic->snicvf[i])
+ continue;
+
+ nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
+ }
+
+ /* Stop the primary VF now */
+ nicvf_vf_stop(dev, nic, cleanup);
/* Disable loopback */
ret = nicvf_loopback_config(nic, 0);
if (ret)
PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
+ /* Reclaim CPI configuration */
+ ret = nicvf_mbox_config_cpi(nic, 0);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
+}
+
+static void
+nicvf_dev_stop(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ nicvf_dev_stop_cleanup(dev, false);
+}
+
+static void
+nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
+{
+ int ret;
+ uint16_t qidx;
+ uint16_t tx_start, tx_end;
+ uint16_t rx_start, rx_end;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (cleanup) {
+ /* Let PF make the BGX's RX and TX switches to OFF position */
+ nicvf_mbox_shutdown(nic);
+ }
+
/* Disable VLAN Strip */
nicvf_vlan_hw_strip(nic, 0);
- /* Reclaim sq */
- for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++)
- nicvf_stop_tx_queue(dev, qidx);
+ /* Get queue ranges for this VF */
+ nicvf_tx_range(dev, nic, &tx_start, &tx_end);
+
+ for (qidx = tx_start; qidx <= tx_end; qidx++)
+ nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
+
+ /* Get queue ranges for this VF */
+ nicvf_rx_range(dev, nic, &rx_start, &rx_end);
/* Reclaim rq */
- for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++)
- nicvf_stop_rx_queue(dev, qidx);
+ for (qidx = rx_start; qidx <= rx_end; qidx++)
+ nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
/* Reclaim RBDR */
ret = nicvf_qset_rbdr_reclaim(nic, 0);
@@ -1482,17 +1763,10 @@ nicvf_dev_stop(struct rte_eth_dev *dev)
/* Move all charged buffers in RBDR back to pool */
if (nic->rbdr != NULL)
- nicvf_rbdr_release_mbufs(nic);
-
- /* Reclaim CPI configuration */
- if (!nic->sqs_mode) {
- ret = nicvf_mbox_config_cpi(nic, 0);
- if (ret)
- PMD_INIT_LOG(ERR, "Failed to reclaim CPI config");
- }
+ nicvf_rbdr_release_mbufs(dev, nic);
/* Disable qset */
- ret = nicvf_qset_config(nic);
+ ret = nicvf_qset_reclaim(nic);
if (ret)
PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
@@ -1509,21 +1783,54 @@ nicvf_dev_stop(struct rte_eth_dev *dev)
static void
nicvf_dev_close(struct rte_eth_dev *dev)
{
+ size_t i;
struct nicvf *nic = nicvf_pmd_priv(dev);
PMD_INIT_FUNC_TRACE();
- nicvf_dev_stop(dev);
- nicvf_periodic_alarm_stop(nic);
+ nicvf_dev_stop_cleanup(dev, true);
+ nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
+
+ for (i = 0; i < nic->sqs_count; i++) {
+ if (!nic->snicvf[i])
+ continue;
+
+ nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
+ }
+}
+
+static int
+nicvf_request_sqs(struct nicvf *nic)
+{
+ size_t i;
+
+ assert_primary(nic);
+ assert(nic->sqs_count > 0);
+ assert(nic->sqs_count <= MAX_SQS_PER_VF);
+
+ /* Set no of Rx/Tx queues in each of the SQsets */
+ for (i = 0; i < nic->sqs_count; i++) {
+ if (nicvf_svf_empty())
+ rte_panic("Cannot assign sufficient number of "
+ "secondary queues to primary VF%" PRIu8 "\n",
+ nic->vf_id);
+
+ nic->snicvf[i] = nicvf_svf_pop();
+ nic->snicvf[i]->sqs_id = i;
+ }
+
+ return nicvf_mbox_request_sqs(nic);
}
static int
nicvf_dev_configure(struct rte_eth_dev *dev)
{
- struct rte_eth_conf *conf = &dev->data->dev_conf;
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
struct rte_eth_rxmode *rxmode = &conf->rxmode;
struct rte_eth_txmode *txmode = &conf->txmode;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ uint8_t cqcount;
PMD_INIT_FUNC_TRACE();
@@ -1588,6 +1895,26 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
return -EINVAL;
}
+ assert_primary(nic);
+ NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
+ cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
+ if (cqcount > MAX_RCV_QUEUES_PER_QS) {
+ nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
+ nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
+ } else {
+ nic->sqs_count = 0;
+ }
+
+ assert(nic->sqs_count <= MAX_SQS_PER_VF);
+
+ if (nic->sqs_count > 0) {
+ if (nicvf_request_sqs(nic)) {
+ rte_panic("Cannot assign sufficient number of "
+ "secondary queues to PORT%d VF%" PRIu8 "\n",
+ dev->data->port_id, nic->vf_id);
+ }
+ }
+
PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
dev->data->port_id, nicvf_hw_cap(nic));
@@ -1636,10 +1963,16 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- /* Setup callbacks for secondary process */
- nicvf_set_tx_function(eth_dev);
- nicvf_set_rx_function(eth_dev);
- return 0;
+ if (nic) {
+ /* Setup callbacks for secondary process */
+ nicvf_set_tx_function(eth_dev);
+ nicvf_set_rx_function(eth_dev);
+ return 0;
+ } else {
+ /* If nic == NULL than it is secondary function
+ * so ethdev need to be released by caller */
+ return ENOTSUP;
+ }
}
pci_dev = eth_dev->pci_dev;
@@ -1649,7 +1982,6 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
nic->vendor_id = pci_dev->id.vendor_id;
nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
- nic->eth_dev = eth_dev;
PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
pci_dev->id.vendor_id, pci_dev->id.device_id,
@@ -1665,7 +1997,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
nicvf_disable_all_interrupts(nic);
- ret = nicvf_periodic_alarm_start(nic);
+ ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
if (ret) {
PMD_INIT_LOG(ERR, "Failed to start period alarm");
goto fail;
@@ -1685,11 +2017,28 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
);
}
+ ret = nicvf_base_init(nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
+ goto malloc_fail;
+ }
+
if (nic->sqs_mode) {
- PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching...");
- /* Detach port by returning Positive error number */
- ret = ENOTSUP;
- goto alarm_fail;
+ /* Push nic to stack of secondary vfs */
+ nicvf_svf_push(nic);
+
+ /* Steal nic pointer from the device for further reuse */
+ eth_dev->data->dev_private = NULL;
+
+ nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
+ ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to start period alarm");
+ goto fail;
+ }
+
+ /* Detach port by returning postive error number */
+ return ENOTSUP;
}
eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
@@ -1710,18 +2059,6 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
goto malloc_fail;
}
- ret = nicvf_base_init(nic);
- if (ret) {
- PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
- goto malloc_fail;
- }
-
- ret = nicvf_mbox_get_rss_size(nic);
- if (ret) {
- PMD_INIT_LOG(ERR, "Failed to get rss table size");
- goto malloc_fail;
- }
-
PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, nic->vendor_id, nic->device_id,
nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
@@ -1732,7 +2069,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
malloc_fail:
rte_free(eth_dev->data->mac_addrs);
alarm_fail:
- nicvf_periodic_alarm_stop(nic);
+ nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
fail:
return ret;
}
@@ -1741,16 +2078,30 @@ static const struct rte_pci_id pci_id_nicvf_map[] = {
{
.class_id = RTE_CLASS_ANY_ID,
.vendor_id = PCI_VENDOR_ID_CAVIUM,
- .device_id = PCI_DEVICE_ID_THUNDERX_PASS1_NICVF,
+ .device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
+ },
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
+ .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
+ },
+ {
+ .class_id = RTE_CLASS_ANY_ID,
+ .vendor_id = PCI_VENDOR_ID_CAVIUM,
+ .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
- .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
},
{
.class_id = RTE_CLASS_ANY_ID,
.vendor_id = PCI_VENDOR_ID_CAVIUM,
- .device_id = PCI_DEVICE_ID_THUNDERX_PASS2_NICVF,
+ .device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
- .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF,
+ .subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
},
{
.vendor_id = 0,
@@ -1759,29 +2110,14 @@ static const struct rte_pci_id pci_id_nicvf_map[] = {
static struct eth_driver rte_nicvf_pmd = {
.pci_drv = {
- .name = "rte_nicvf_pmd",
.id_table = pci_id_nicvf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = nicvf_eth_dev_init,
.dev_private_size = sizeof(struct nicvf),
};
-static int
-rte_nicvf_pmd_init(const char *name __rte_unused, const char *para __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- PMD_INIT_LOG(INFO, "librte_pmd_thunderx nicvf version %s",
- THUNDERX_NICVF_PMD_VERSION);
-
- rte_eth_driver_register(&rte_nicvf_pmd);
- return 0;
-}
-
-static struct rte_driver rte_nicvf_driver = {
- .type = PMD_PDEV,
- .init = rte_nicvf_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_nicvf_driver, thunderx_nicvf);
-DRIVER_REGISTER_PCI_TABLE(thunderx_nicvf, pci_id_nicvf_map);
+RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h
index 34447e05..a74219fa 100644
--- a/drivers/net/thunderx/nicvf_ethdev.h
+++ b/drivers/net/thunderx/nicvf_ethdev.h
@@ -35,7 +35,7 @@
#include <rte_ethdev.h>
-#define THUNDERX_NICVF_PMD_VERSION "1.0"
+#define THUNDERX_NICVF_PMD_VERSION "2.0"
#define THUNDERX_REG_BYTES 8
#define NICVF_INTR_POLL_INTERVAL_MS 50
@@ -87,6 +87,17 @@ nicvf_mbuff_meta_length(struct rte_mbuf *mbuf)
return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf);
}
+static inline uint16_t
+nicvf_netdev_qidx(struct nicvf *nic, uint8_t local_qidx)
+{
+ uint16_t global_qidx = local_qidx;
+
+ if (nic->sqs_mode)
+ global_qidx += ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
+
+ return global_qidx;
+}
+
/*
* Simple phy2virt functions assuming mbufs are in a single huge page
* V = P + offset
@@ -104,4 +115,32 @@ nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off)
return (phys_addr_t)(virt - mbuf_phys_off);
}
+static inline void
+nicvf_tx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *tx_start,
+ uint16_t *tx_end)
+{
+ uint16_t tmp;
+
+ *tx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
+ MAX_SND_QUEUES_PER_QS);
+ tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
+ MAX_SND_QUEUES_PER_QS) - 1;
+ *tx_end = dev->data->nb_tx_queues ?
+ RTE_MIN(tmp, dev->data->nb_tx_queues - 1) : 0;
+}
+
+static inline void
+nicvf_rx_range(struct rte_eth_dev *dev, struct nicvf *nic, uint16_t *rx_start,
+ uint16_t *rx_end)
+{
+ uint16_t tmp;
+
+ *rx_start = RTE_ALIGN_FLOOR(nicvf_netdev_qidx(nic, 0),
+ MAX_RCV_QUEUES_PER_QS);
+ tmp = RTE_ALIGN_CEIL(nicvf_netdev_qidx(nic, 0) + 1,
+ MAX_RCV_QUEUES_PER_QS) - 1;
+ *rx_end = dev->data->nb_rx_queues ?
+ RTE_MIN(tmp, dev->data->nb_rx_queues - 1) : 0;
+}
+
#endif /* __THUNDERX_NICVF_ETHDEV_H__ */
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index e15c7303..fc43b747 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -368,7 +368,8 @@ nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
void *obj_p[NICVF_MAX_RX_FREE_THRESH] __rte_cache_aligned;
if (unlikely(rte_mempool_get_bulk(rxq->pool, obj_p, to_fill) < 0)) {
- rxq->nic->eth_dev->data->rx_mbuf_alloc_failed += to_fill;
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ to_fill;
return 0;
}
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index c52545d9..c900e121 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -113,12 +113,16 @@ struct nicvf {
uint16_t subsystem_vendor_id;
struct nicvf_rbdr *rbdr;
struct nicvf_rss_reta_info rss_info;
- struct rte_eth_dev *eth_dev;
struct rte_intr_handle intr_handle;
uint8_t cpi_alg;
uint16_t mtu;
bool vlan_filter_en;
uint8_t mac_addr[ETHER_ADDR_LEN];
+ /* secondary queue set support */
+ uint8_t sqs_id;
+ uint8_t sqs_count;
+#define MAX_SQS_PER_VF 11
+ struct nicvf *snicvf[MAX_SQS_PER_VF];
} __rte_cache_aligned;
#endif /* _THUNDERX_NICVF_STRUCT_H */
diff --git a/drivers/net/thunderx/nicvf_svf.c b/drivers/net/thunderx/nicvf_svf.c
new file mode 100644
index 00000000..f746e946
--- /dev/null
+++ b/drivers/net/thunderx/nicvf_svf.c
@@ -0,0 +1,78 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <rte_debug.h>
+#include <rte_malloc.h>
+
+#include "base/nicvf_bsvf.h"
+
+#include "nicvf_svf.h"
+
+void
+nicvf_svf_push(struct nicvf *vf)
+{
+ struct svf_entry *entry = NULL;
+
+ assert(vf != NULL);
+
+ entry = rte_zmalloc("nicvf", sizeof(*entry), RTE_CACHE_LINE_SIZE);
+ if (entry == NULL)
+ rte_panic("Cannoc allocate memory for svf_entry\n");
+
+ entry->vf = vf;
+
+ nicvf_bsvf_push(entry);
+}
+
+struct nicvf *
+nicvf_svf_pop(void)
+{
+ struct nicvf *vf;
+ struct svf_entry *entry;
+
+ entry = nicvf_bsvf_pop();
+
+ vf = entry->vf;
+
+ rte_free(entry);
+
+ return vf;
+}
+
+int
+nicvf_svf_empty(void)
+{
+ return nicvf_bsvf_empty();
+}
diff --git a/drivers/net/thunderx/nicvf_svf.h b/drivers/net/thunderx/nicvf_svf.h
new file mode 100644
index 00000000..6471aa57
--- /dev/null
+++ b/drivers/net/thunderx/nicvf_svf.h
@@ -0,0 +1,66 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __THUNDERX_NICVF_SVF_H__
+#define __THUNDERX_NICVF_SVF_H__
+
+struct nicvf;
+
+/**
+ * Enqueue new VF to secondary qsets.
+ *
+ * @param entry
+ * Entry to be enqueued.
+ */
+void
+nicvf_svf_push(struct nicvf *vf);
+
+/**
+ * Dequeue a VF from secondary qsets.
+ *
+ * @return
+ * Dequeued entry.
+ */
+struct nicvf *
+nicvf_svf_pop(void);
+
+/**
+ * Check if the queue of secondary qsets is empty.
+ *
+ * @return
+ * 0 on non-empty
+ * otherwise empty
+ */
+int
+nicvf_svf_empty(void);
+
+#endif /* __THUNDERX_NICVF_SVF_H__ */
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 7539cd49..766d4ef1 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -41,7 +41,7 @@
#include <rte_ethdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_kvargs.h>
#include <rte_virtio_net.h>
#include <rte_spinlock.h>
@@ -51,6 +51,7 @@
#define ETH_VHOST_IFACE_ARG "iface"
#define ETH_VHOST_QUEUES_ARG "queues"
#define ETH_VHOST_CLIENT_ARG "client"
+#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
static const char *drivername = "VHOST PMD";
@@ -58,6 +59,7 @@ static const char *valid_arguments[] = {
ETH_VHOST_IFACE_ARG,
ETH_VHOST_QUEUES_ARG,
ETH_VHOST_CLIENT_ARG,
+ ETH_VHOST_DEQUEUE_ZERO_COPY,
NULL
};
@@ -72,6 +74,32 @@ static struct ether_addr base_eth_addr = {
}
};
+enum vhost_xstats_pkts {
+ VHOST_UNDERSIZE_PKT = 0,
+ VHOST_64_PKT,
+ VHOST_65_TO_127_PKT,
+ VHOST_128_TO_255_PKT,
+ VHOST_256_TO_511_PKT,
+ VHOST_512_TO_1023_PKT,
+ VHOST_1024_TO_1522_PKT,
+ VHOST_1523_TO_MAX_PKT,
+ VHOST_BROADCAST_PKT,
+ VHOST_MULTICAST_PKT,
+ VHOST_UNICAST_PKT,
+ VHOST_ERRORS_PKT,
+ VHOST_ERRORS_FRAGMENTED,
+ VHOST_ERRORS_JABBER,
+ VHOST_UNKNOWN_PROTOCOL,
+ VHOST_XSTATS_MAX,
+};
+
+struct vhost_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t missed_pkts;
+ uint64_t xstats[VHOST_XSTATS_MAX];
+};
+
struct vhost_queue {
int vid;
rte_atomic32_t allow_queuing;
@@ -80,11 +108,7 @@ struct vhost_queue {
struct rte_mempool *mb_pool;
uint8_t port;
uint16_t virtqueue_id;
- uint64_t rx_pkts;
- uint64_t tx_pkts;
- uint64_t missed_pkts;
- uint64_t rx_bytes;
- uint64_t tx_bytes;
+ struct vhost_stats stats;
};
struct pmd_internal {
@@ -127,6 +151,242 @@ struct rte_vhost_vring_state {
static struct rte_vhost_vring_state *vring_states[RTE_MAX_ETHPORTS];
+#define VHOST_XSTATS_NAME_SIZE 64
+
+struct vhost_xstats_name_off {
+ char name[VHOST_XSTATS_NAME_SIZE];
+ uint64_t offset;
+};
+
+/* [rx]_is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+ {"fragmented_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_FRAGMENTED])},
+ {"jabber_errors",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_JABBER])},
+ {"unknown_protos_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNKNOWN_PROTOCOL])},
+};
+
+/* [tx]_ is prepended to the name string here */
+static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
+ {"good_packets",
+ offsetof(struct vhost_queue, stats.pkts)},
+ {"total_bytes",
+ offsetof(struct vhost_queue, stats.bytes)},
+ {"missed_pkts",
+ offsetof(struct vhost_queue, stats.missed_pkts)},
+ {"broadcast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
+ {"multicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_MULTICAST_PKT])},
+ {"unicast_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNICAST_PKT])},
+ {"undersize_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_UNDERSIZE_PKT])},
+ {"size_64_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_64_PKT])},
+ {"size_65_to_127_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_65_TO_127_PKT])},
+ {"size_128_to_255_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_128_TO_255_PKT])},
+ {"size_256_to_511_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_256_TO_511_PKT])},
+ {"size_512_to_1023_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_512_TO_1023_PKT])},
+ {"size_1024_to_1522_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1024_TO_1522_PKT])},
+ {"size_1523_to_max_packets",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_1523_TO_MAX_PKT])},
+ {"errors_with_bad_CRC",
+ offsetof(struct vhost_queue, stats.xstats[VHOST_ERRORS_PKT])},
+};
+
+#define VHOST_NB_XSTATS_RXPORT (sizeof(vhost_rxport_stat_strings) / \
+ sizeof(vhost_rxport_stat_strings[0]))
+
+#define VHOST_NB_XSTATS_TXPORT (sizeof(vhost_txport_stat_strings) / \
+ sizeof(vhost_txport_stat_strings[0]))
+
+static void
+vhost_dev_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct vhost_queue *vq = NULL;
+ unsigned int i = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ memset(&vq->stats, 0, sizeof(vq->stats));
+ }
+}
+
+static int
+vhost_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit __rte_unused)
+{
+ unsigned int t = 0;
+ int count = 0;
+ int nstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+
+ if (!xstats_names)
+ return nstats;
+ for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_%s", vhost_rxport_stat_strings[t].name);
+ count++;
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_%s", vhost_txport_stat_strings[t].name);
+ count++;
+ }
+ return count;
+}
+
+static int
+vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int t;
+ unsigned int count = 0;
+ struct vhost_queue *vq = NULL;
+ unsigned int nxstats = VHOST_NB_XSTATS_RXPORT + VHOST_NB_XSTATS_TXPORT;
+
+ if (n < nxstats)
+ return nxstats;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
+ + vq->stats.missed_pkts
+ - (vq->stats.xstats[VHOST_BROADCAST_PKT]
+ + vq->stats.xstats[VHOST_MULTICAST_PKT]);
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
+ xstats[count].value = 0;
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ xstats[count].value +=
+ *(uint64_t *)(((char *)vq)
+ + vhost_rxport_stat_strings[t].offset);
+ }
+ count++;
+ }
+ for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
+ xstats[count].value = 0;
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ xstats[count].value +=
+ *(uint64_t *)(((char *)vq)
+ + vhost_txport_stat_strings[t].offset);
+ }
+ count++;
+ }
+ return count;
+}
+
+static inline void
+vhost_count_multicast_broadcast(struct vhost_queue *vq,
+ struct rte_mbuf *mbuf)
+{
+ struct ether_addr *ea = NULL;
+ struct vhost_stats *pstats = &vq->stats;
+
+ ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
+ if (is_multicast_ether_addr(ea)) {
+ if (is_broadcast_ether_addr(ea))
+ pstats->xstats[VHOST_BROADCAST_PKT]++;
+ else
+ pstats->xstats[VHOST_MULTICAST_PKT]++;
+ }
+}
+
+static void
+vhost_update_packet_xstats(struct vhost_queue *vq,
+ struct rte_mbuf **bufs,
+ uint16_t count)
+{
+ uint32_t pkt_len = 0;
+ uint64_t i = 0;
+ uint64_t index;
+ struct vhost_stats *pstats = &vq->stats;
+
+ for (i = 0; i < count ; i++) {
+ pkt_len = bufs[i]->pkt_len;
+ if (pkt_len == 64) {
+ pstats->xstats[VHOST_64_PKT]++;
+ } else if (pkt_len > 64 && pkt_len < 1024) {
+ index = (sizeof(pkt_len) * 8)
+ - __builtin_clz(pkt_len) - 5;
+ pstats->xstats[index]++;
+ } else {
+ if (pkt_len < 64)
+ pstats->xstats[VHOST_UNDERSIZE_PKT]++;
+ else if (pkt_len <= 1522)
+ pstats->xstats[VHOST_1024_TO_1522_PKT]++;
+ else if (pkt_len > 1522)
+ pstats->xstats[VHOST_1523_TO_MAX_PKT]++;
+ }
+ vhost_count_multicast_broadcast(vq, bufs[i]);
+ }
+}
+
static uint16_t
eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
@@ -145,13 +405,15 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_rx = rte_vhost_dequeue_burst(r->vid,
r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
- r->rx_pkts += nb_rx;
+ r->stats.pkts += nb_rx;
for (i = 0; likely(i < nb_rx); i++) {
bufs[i]->port = r->port;
- r->rx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
}
+ vhost_update_packet_xstats(r, bufs, nb_rx);
+
out:
rte_atomic32_set(&r->while_queuing, 0);
@@ -176,11 +438,20 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
nb_tx = rte_vhost_enqueue_burst(r->vid,
r->virtqueue_id, bufs, nb_bufs);
- r->tx_pkts += nb_tx;
- r->missed_pkts += nb_bufs - nb_tx;
+ r->stats.pkts += nb_tx;
+ r->stats.missed_pkts += nb_bufs - nb_tx;
for (i = 0; likely(i < nb_tx); i++)
- r->tx_bytes += bufs[i]->pkt_len;
+ r->stats.bytes += bufs[i]->pkt_len;
+
+ vhost_update_packet_xstats(r, bufs, nb_tx);
+
+ /* According to RFC2863 page42 section ifHCOutMulticastPkts and
+ * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
+ * are increased when packets are not transmitted successfully.
+ */
+ for (i = nb_tx; i < nb_bufs; i++)
+ vhost_count_multicast_broadcast(r, bufs[i]);
for (i = 0; likely(i < nb_tx); i++)
rte_pktmbuf_free(bufs[i]);
@@ -290,7 +561,7 @@ new_device(int vid)
RTE_LOG(INFO, PMD, "New connection established\n");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
return 0;
}
@@ -357,7 +628,7 @@ destroy_device(int vid)
RTE_LOG(INFO, PMD, "Connection closed\n");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int
@@ -386,7 +657,7 @@ vring_state_changed(int vid, uint16_t vring, int enable)
RTE_LOG(INFO, PMD, "vring%u is %s\n",
vring, enable ? "enabled" : "disabled");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
return 0;
}
@@ -428,6 +699,35 @@ rte_eth_vhost_get_queue_event(uint8_t port_id,
return -1;
}
+int
+rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
+{
+ struct internal_list *list;
+ struct rte_eth_dev *eth_dev;
+ struct vhost_queue *vq;
+ int vid = -1;
+
+ if (!rte_eth_dev_is_valid_port(port_id))
+ return -1;
+
+ pthread_mutex_lock(&internal_list_lock);
+
+ TAILQ_FOREACH(list, &internal_list, next) {
+ eth_dev = list->eth_dev;
+ if (eth_dev->data->port_id == port_id) {
+ vq = eth_dev->data->rx_queues[0];
+ if (vq) {
+ vid = vq->vid;
+ }
+ break;
+ }
+ }
+
+ pthread_mutex_unlock(&internal_list_lock);
+
+ return vid;
+}
+
static void *
vhost_driver_session(void *param __rte_unused)
{
@@ -582,10 +882,10 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- stats->q_ipackets[i] = vq->rx_pkts;
+ stats->q_ipackets[i] = vq->stats.pkts;
rx_total += stats->q_ipackets[i];
- stats->q_ibytes[i] = vq->rx_bytes;
+ stats->q_ibytes[i] = vq->stats.bytes;
rx_total_bytes += stats->q_ibytes[i];
}
@@ -594,11 +894,11 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- stats->q_opackets[i] = vq->tx_pkts;
- tx_missed_total += vq->missed_pkts;
+ stats->q_opackets[i] = vq->stats.pkts;
+ tx_missed_total += vq->stats.missed_pkts;
tx_total += stats->q_opackets[i];
- stats->q_obytes[i] = vq->tx_bytes;
+ stats->q_obytes[i] = vq->stats.bytes;
tx_total_bytes += stats->q_obytes[i];
}
@@ -619,16 +919,16 @@ eth_stats_reset(struct rte_eth_dev *dev)
if (dev->data->rx_queues[i] == NULL)
continue;
vq = dev->data->rx_queues[i];
- vq->rx_pkts = 0;
- vq->rx_bytes = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (dev->data->tx_queues[i] == NULL)
continue;
vq = dev->data->tx_queues[i];
- vq->tx_pkts = 0;
- vq->tx_bytes = 0;
- vq->missed_pkts = 0;
+ vq->stats.pkts = 0;
+ vq->stats.bytes = 0;
+ vq->stats.missed_pkts = 0;
}
}
@@ -682,6 +982,9 @@ static const struct eth_dev_ops ops = {
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
+ .xstats_reset = vhost_dev_xstats_reset,
+ .xstats_get = vhost_dev_xstats_get,
+ .xstats_get_names = vhost_dev_xstats_get_names,
};
static int
@@ -714,7 +1017,7 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
goto error;
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL)
goto error;
@@ -823,7 +1126,7 @@ open_int(const char *key __rte_unused, const char *value, void *extra_args)
}
static int
-rte_pmd_vhost_devinit(const char *name, const char *params)
+rte_pmd_vhost_probe(const char *name, const char *params)
{
struct rte_kvargs *kvlist = NULL;
int ret = 0;
@@ -831,6 +1134,7 @@ rte_pmd_vhost_devinit(const char *name, const char *params)
uint16_t queues;
uint64_t flags = 0;
int client_mode = 0;
+ int dequeue_zero_copy = 0;
RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name);
@@ -867,6 +1171,16 @@ rte_pmd_vhost_devinit(const char *name, const char *params)
flags |= RTE_VHOST_USER_CLIENT;
}
+ if (rte_kvargs_count(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_DEQUEUE_ZERO_COPY,
+ &open_int, &dequeue_zero_copy);
+ if (ret < 0)
+ goto out_free;
+
+ if (dequeue_zero_copy)
+ flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
+ }
+
eth_dev_vhost_create(name, iface_name, queues, rte_socket_id(), flags);
out_free:
@@ -875,7 +1189,7 @@ out_free:
}
static int
-rte_pmd_vhost_devuninit(const char *name)
+rte_pmd_vhost_remove(const char *name)
{
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internal *internal;
@@ -924,13 +1238,13 @@ rte_pmd_vhost_devuninit(const char *name)
return 0;
}
-static struct rte_driver pmd_vhost_drv = {
- .type = PMD_VDEV,
- .init = rte_pmd_vhost_devinit,
- .uninit = rte_pmd_vhost_devuninit,
+static struct rte_vdev_driver pmd_vhost_drv = {
+ .probe = rte_pmd_vhost_probe,
+ .remove = rte_pmd_vhost_remove,
};
-PMD_REGISTER_DRIVER(pmd_vhost_drv, eth_vhost);
-DRIVER_REGISTER_PARAM_STRING(eth_vhost,
+RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
+RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
+RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
"iface=<ifc> "
"queues=<int>");
diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h
index ff5d877b..7c98b1ae 100644
--- a/drivers/net/vhost/rte_eth_vhost.h
+++ b/drivers/net/vhost/rte_eth_vhost.h
@@ -102,6 +102,15 @@ struct rte_eth_vhost_queue_event {
int rte_eth_vhost_get_queue_event(uint8_t port_id,
struct rte_eth_vhost_queue_event *event);
+/**
+ * Get the 'vid' value associated with the specified port.
+ *
+ * @return
+ * - On success, the 'vid' associated with 'port_id'.
+ * - On failure, a negative value.
+ */
+int rte_eth_vhost_get_vid_from_port_id(uint8_t port_id);
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/net/vhost/rte_pmd_vhost_version.map b/drivers/net/vhost/rte_pmd_vhost_version.map
index 65bf3a8c..3d44083f 100644
--- a/drivers/net/vhost/rte_pmd_vhost_version.map
+++ b/drivers/net/vhost/rte_pmd_vhost_version.map
@@ -8,3 +8,9 @@ DPDK_16.04 {
local: *;
};
+
+DPDK_16.11 {
+ global:
+
+ rte_eth_vhost_get_vid_from_port_id;
+};
diff --git a/drivers/net/virtio/Makefile b/drivers/net/virtio/Makefile
index 3020b688..97972a6c 100644
--- a/drivers/net/virtio/Makefile
+++ b/drivers/net/virtio/Makefile
@@ -50,9 +50,12 @@ SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtqueue.c
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_pci.c
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_ethdev.c
-
-ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSSE3,$(CFLAGS)),RTE_MACHINE_CPUFLAG_SSSE3)
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_simple.c
+
+ifeq ($(CONFIG_RTE_ARCH_X86),y)
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_simple_sse.c
+else ifneq ($(filter y,$(CONFIG_RTE_ARCH_ARM) $(CONFIG_RTE_ARCH_ARM64)),)
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_simple_neon.c
endif
ifeq ($(CONFIG_RTE_VIRTIO_USER),y)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 86cf8a38..079fd6c8 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -103,7 +103,8 @@ static int virtio_dev_queue_stats_mapping_set(
* The set of PCI devices this driver supports
*/
static const struct rte_pci_id pci_id_virtio_map[] = {
- { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_DEVICEID_MIN) },
+ { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_LEGACY_DEVICEID_NET) },
+ { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_MODERN_DEVICEID_NET) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -279,28 +280,65 @@ virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues)
return 0;
}
-void
-virtio_dev_queue_release(struct virtqueue *vq)
+static void
+virtio_dev_queue_release(void *queue __rte_unused)
{
- struct virtio_hw *hw;
+ /* do nothing */
+}
- if (vq) {
- hw = vq->hw;
- if (vq->configured)
- hw->vtpci_ops->del_queue(hw, vq);
+static int
+virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
+{
+ if (vtpci_queue_idx == hw->max_queue_pairs * 2)
+ return VTNET_CQ;
+ else if (vtpci_queue_idx % 2 == 0)
+ return VTNET_RQ;
+ else
+ return VTNET_TQ;
+}
- rte_free(vq->sw_ring);
- rte_free(vq);
- }
+static uint16_t
+virtio_get_nr_vq(struct virtio_hw *hw)
+{
+ uint16_t nr_vq = hw->max_queue_pairs * 2;
+
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
+ nr_vq += 1;
+
+ return nr_vq;
}
-int virtio_dev_queue_setup(struct rte_eth_dev *dev,
- int queue_type,
- uint16_t queue_idx,
- uint16_t vtpci_queue_idx,
- uint16_t nb_desc,
- unsigned int socket_id,
- void **pvq)
+static void
+virtio_init_vring(struct virtqueue *vq)
+{
+ int size = vq->vq_nentries;
+ struct vring *vr = &vq->vq_ring;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /*
+ * Reinitialise since virtio port might have been stopped and restarted
+ */
+ memset(ring_mem, 0, vq->vq_ring_size);
+ vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_used_cons_idx = 0;
+ vq->vq_desc_head_idx = 0;
+ vq->vq_avail_idx = 0;
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+ memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
+
+ vring_desc_init(vr->desc, size);
+
+ /*
+ * Disable device(host) interrupting guest
+ */
+ virtqueue_disable_intr(vq);
+}
+
+static int
+virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
{
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
@@ -311,9 +349,9 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
struct virtnet_tx *txvq = NULL;
struct virtnet_ctl *cvq = NULL;
struct virtqueue *vq;
- const char *queue_names[] = {"rvq", "txq", "cvq"};
- size_t sz_vq, sz_q = 0, sz_hdr_mz = 0;
+ size_t sz_hdr_mz = 0;
void *sw_ring = NULL;
+ int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
int ret;
PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
@@ -323,7 +361,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
* Always power of 2 and if 0 virtqueue does not exist
*/
vq_size = hw->vtpci_ops->get_queue_num(hw, vtpci_queue_idx);
- PMD_INIT_LOG(DEBUG, "vq_size: %u nb_desc:%u", vq_size, nb_desc);
+ PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
if (vq_size == 0) {
PMD_INIT_LOG(ERR, "virtqueue does not exist");
return -EINVAL;
@@ -334,40 +372,35 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- snprintf(vq_name, sizeof(vq_name), "port%d_%s%d",
- dev->data->port_id, queue_names[queue_type], queue_idx);
+ snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
+ dev->data->port_id, vtpci_queue_idx);
- sz_vq = RTE_ALIGN_CEIL(sizeof(*vq) +
+ size = RTE_ALIGN_CEIL(sizeof(*vq) +
vq_size * sizeof(struct vq_desc_extra),
RTE_CACHE_LINE_SIZE);
- if (queue_type == VTNET_RQ) {
- sz_q = sz_vq + sizeof(*rxvq);
- } else if (queue_type == VTNET_TQ) {
- sz_q = sz_vq + sizeof(*txvq);
+ if (queue_type == VTNET_TQ) {
/*
* For each xmit packet, allocate a virtio_net_hdr
* and indirect ring elements
*/
sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
} else if (queue_type == VTNET_CQ) {
- sz_q = sz_vq + sizeof(*cvq);
/* Allocate a page for control vq command, data and status */
sz_hdr_mz = PAGE_SIZE;
}
- vq = rte_zmalloc_socket(vq_name, sz_q, RTE_CACHE_LINE_SIZE, socket_id);
+ vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
+ SOCKET_ID_ANY);
if (vq == NULL) {
PMD_INIT_LOG(ERR, "can not allocate vq");
return -ENOMEM;
}
+ hw->vqs[vtpci_queue_idx] = vq;
+
vq->hw = hw;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
- if (nb_desc == 0 || nb_desc > vq_size)
- nb_desc = vq_size;
- vq->vq_free_cnt = nb_desc;
-
/*
* Reserve a memzone for vring elements
*/
@@ -376,7 +409,8 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
size, vq->vq_ring_size);
- mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, socket_id,
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ SOCKET_ID_ANY,
0, VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
if (rte_errno == EEXIST)
@@ -396,12 +430,13 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
(uint64_t)(uintptr_t)mz->addr);
+ virtio_init_vring(vq);
+
if (sz_hdr_mz) {
- snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_%s%d_hdr",
- dev->data->port_id, queue_names[queue_type],
- queue_idx);
+ snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
+ dev->data->port_id, vtpci_queue_idx);
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
- socket_id, 0,
+ SOCKET_ID_ANY, 0,
RTE_CACHE_LINE_SIZE);
if (hdr_mz == NULL) {
if (rte_errno == EEXIST)
@@ -418,7 +453,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
sizeof(vq->sw_ring[0]);
sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
- RTE_CACHE_LINE_SIZE, socket_id);
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
if (!sw_ring) {
PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
ret = -ENOMEM;
@@ -426,30 +461,26 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
}
vq->sw_ring = sw_ring;
- rxvq = (struct virtnet_rx *)RTE_PTR_ADD(vq, sz_vq);
+ rxvq = &vq->rxq;
rxvq->vq = vq;
rxvq->port_id = dev->data->port_id;
- rxvq->queue_id = queue_idx;
rxvq->mz = mz;
- *pvq = rxvq;
} else if (queue_type == VTNET_TQ) {
- txvq = (struct virtnet_tx *)RTE_PTR_ADD(vq, sz_vq);
+ txvq = &vq->txq;
txvq->vq = vq;
txvq->port_id = dev->data->port_id;
- txvq->queue_id = queue_idx;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
-
- *pvq = txvq;
} else if (queue_type == VTNET_CQ) {
- cvq = (struct virtnet_ctl *)RTE_PTR_ADD(vq, sz_vq);
+ cvq = &vq->cq;
cvq->vq = vq;
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
- *pvq = cvq;
+
+ hw->cvq = cvq;
}
/* For virtio_user case (that is when dev->pci_dev is NULL), we use
@@ -490,11 +521,9 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev,
if (hw->vtpci_ops->setup_queue(hw, vq) < 0) {
PMD_INIT_LOG(ERR, "setup_queue failed");
- virtio_dev_queue_release(vq);
return -EINVAL;
}
- vq->configured = 1;
return 0;
fail_q_alloc:
@@ -506,40 +535,60 @@ fail_q_alloc:
return ret;
}
-static int
-virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx,
- uint32_t socket_id)
+static void
+virtio_free_queues(struct virtio_hw *hw)
{
- struct virtnet_ctl *cvq;
- int ret;
- struct virtio_hw *hw = dev->data->dev_private;
+ uint16_t nr_vq = virtio_get_nr_vq(hw);
+ struct virtqueue *vq;
+ int queue_type;
+ uint16_t i;
- PMD_INIT_FUNC_TRACE();
- ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX,
- vtpci_queue_idx, 0, socket_id, (void **)&cvq);
- if (ret < 0) {
- PMD_INIT_LOG(ERR, "control vq initialization failed");
- return ret;
+ for (i = 0; i < nr_vq; i++) {
+ vq = hw->vqs[i];
+ if (!vq)
+ continue;
+
+ queue_type = virtio_get_queue_type(hw, i);
+ if (queue_type == VTNET_RQ) {
+ rte_free(vq->sw_ring);
+ rte_memzone_free(vq->rxq.mz);
+ } else if (queue_type == VTNET_TQ) {
+ rte_memzone_free(vq->txq.mz);
+ rte_memzone_free(vq->txq.virtio_net_hdr_mz);
+ } else {
+ rte_memzone_free(vq->cq.mz);
+ rte_memzone_free(vq->cq.virtio_net_hdr_mz);
+ }
+
+ rte_free(vq);
}
- hw->cvq = cvq;
- return 0;
+ rte_free(hw->vqs);
}
-static void
-virtio_free_queues(struct rte_eth_dev *dev)
+static int
+virtio_alloc_queues(struct rte_eth_dev *dev)
{
- unsigned int i;
-
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- virtio_dev_rx_queue_release(dev->data->rx_queues[i]);
+ struct virtio_hw *hw = dev->data->dev_private;
+ uint16_t nr_vq = virtio_get_nr_vq(hw);
+ uint16_t i;
+ int ret;
- dev->data->nb_rx_queues = 0;
+ hw->vqs = rte_zmalloc(NULL, sizeof(struct virtqueue *) * nr_vq, 0);
+ if (!hw->vqs) {
+ PMD_INIT_LOG(ERR, "failed to allocate vqs");
+ return -ENOMEM;
+ }
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- virtio_dev_tx_queue_release(dev->data->tx_queues[i]);
+ for (i = 0; i < nr_vq; i++) {
+ ret = virtio_init_queue(dev, i);
+ if (ret < 0) {
+ virtio_free_queues(hw);
+ return ret;
+ }
+ }
- dev->data->nb_tx_queues = 0;
+ return 0;
}
static void
@@ -553,9 +602,8 @@ virtio_dev_close(struct rte_eth_dev *dev)
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
vtpci_reset(hw);
- hw->started = 0;
virtio_dev_free_mbufs(dev);
- virtio_free_queues(dev);
+ virtio_free_queues(hw);
}
static void
@@ -650,6 +698,23 @@ virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
}
+#define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
+static int
+virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
+ hw->vtnet_hdr_size;
+ uint32_t frame_size = mtu + ether_hdr_len;
+
+ if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) {
+ PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n",
+ ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len);
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* dev_ops for virtio, bare necessities for basic operation
*/
@@ -662,7 +727,7 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.promiscuous_disable = virtio_dev_promiscuous_disable,
.allmulticast_enable = virtio_dev_allmulticast_enable,
.allmulticast_disable = virtio_dev_allmulticast_disable,
-
+ .mtu_set = virtio_mtu_set,
.dev_infos_get = virtio_dev_info_get,
.stats_get = virtio_dev_stats_get,
.xstats_get = virtio_dev_xstats_get,
@@ -671,9 +736,9 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.xstats_reset = virtio_dev_stats_reset,
.link_update = virtio_dev_link_update,
.rx_queue_setup = virtio_dev_rx_queue_setup,
- .rx_queue_release = virtio_dev_rx_queue_release,
+ .rx_queue_release = virtio_dev_queue_release,
.tx_queue_setup = virtio_dev_tx_queue_setup,
- .tx_queue_release = virtio_dev_tx_queue_release,
+ .tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
.vlan_filter_set = virtio_vlan_filter_set,
@@ -1040,14 +1105,13 @@ virtio_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
}
static int
-virtio_negotiate_features(struct virtio_hw *hw)
+virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
{
uint64_t host_features;
/* Prepare guest_features: feature that driver wants to support */
- hw->guest_features = VIRTIO_PMD_GUEST_FEATURES;
PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
- hw->guest_features);
+ req_features);
/* Read device(host) feature bits */
host_features = hw->vtpci_ops->get_features(hw);
@@ -1058,6 +1122,7 @@ virtio_negotiate_features(struct virtio_hw *hw)
* Negotiate features: Subset of device feature bits are written back
* guest feature bits.
*/
+ hw->guest_features = req_features;
hw->guest_features = vtpci_negotiate_features(hw, host_features);
PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
hw->guest_features);
@@ -1076,6 +1141,8 @@ virtio_negotiate_features(struct virtio_hw *hw)
}
}
+ hw->req_guest_features = req_features;
+
return 0;
}
@@ -1101,7 +1168,7 @@ virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
if (isr & VIRTIO_PCI_ISR_CONFIG) {
if (virtio_dev_link_update(dev, 0) == 0)
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC);
+ RTE_ETH_EVENT_INTR_LSC, NULL);
}
}
@@ -1116,47 +1183,16 @@ rx_func_get(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
-/*
- * This function is based on probe() function in virtio_pci.c
- * It returns 0 on success.
- */
-int
-eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
+/* reset device and renegotiate features if needed */
+static int
+virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
struct virtio_net_config *config;
struct virtio_net_config local_config;
- struct rte_pci_device *pci_dev;
- uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE;
+ struct rte_pci_device *pci_dev = eth_dev->pci_dev;
int ret;
- RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
-
- eth_dev->dev_ops = &virtio_eth_dev_ops;
- eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
-
- if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- rx_func_get(eth_dev);
- return 0;
- }
-
- /* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
- if (eth_dev->data->mac_addrs == NULL) {
- PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
- VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
- return -ENOMEM;
- }
-
- pci_dev = eth_dev->pci_dev;
-
- if (pci_dev) {
- ret = vtpci_init(pci_dev, hw, &dev_flags);
- if (ret)
- return ret;
- }
-
/* Reset the device although not necessary at startup */
vtpci_reset(hw);
@@ -1165,15 +1201,16 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
/* Tell the host we've known how to drive the device. */
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
- if (virtio_negotiate_features(hw) < 0)
+ if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
/* If host does not support status then disable LSC */
if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
- dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+ eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+ else
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->data->dev_flags = dev_flags;
rx_func_get(eth_dev);
@@ -1221,16 +1258,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
config->max_virtqueue_pairs = 1;
}
- hw->max_rx_queues =
- (VIRTIO_MAX_RX_QUEUES < config->max_virtqueue_pairs) ?
- VIRTIO_MAX_RX_QUEUES : config->max_virtqueue_pairs;
- hw->max_tx_queues =
- (VIRTIO_MAX_TX_QUEUES < config->max_virtqueue_pairs) ?
- VIRTIO_MAX_TX_QUEUES : config->max_virtqueue_pairs;
-
- virtio_dev_cq_queue_setup(eth_dev,
- config->max_virtqueue_pairs * 2,
- SOCKET_ID_ANY);
+ hw->max_queue_pairs = config->max_virtqueue_pairs;
PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
config->max_virtqueue_pairs);
@@ -1241,23 +1269,73 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
config->mac[2], config->mac[3],
config->mac[4], config->mac[5]);
} else {
- hw->max_rx_queues = 1;
- hw->max_tx_queues = 1;
+ PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
+ hw->max_queue_pairs = 1;
}
- PMD_INIT_LOG(DEBUG, "hw->max_rx_queues=%d hw->max_tx_queues=%d",
- hw->max_rx_queues, hw->max_tx_queues);
+ ret = virtio_alloc_queues(eth_dev);
+ if (ret < 0)
+ return ret;
+ vtpci_reinit_complete(hw);
+
if (pci_dev)
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
+ return 0;
+}
+
+/*
+ * This function is based on probe() function in virtio_pci.c
+ * It returns 0 on success.
+ */
+int
+eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct virtio_hw *hw = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev;
+ uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE;
+ int ret;
+
+ RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
+
+ eth_dev->dev_ops = &virtio_eth_dev_ops;
+ eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
+ rx_func_get(eth_dev);
+ return 0;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("virtio", VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+
+ pci_dev = eth_dev->pci_dev;
+
+ if (pci_dev) {
+ ret = vtpci_init(pci_dev, hw, &dev_flags);
+ if (ret)
+ return ret;
+ }
+
+ eth_dev->data->dev_flags = dev_flags;
+
+ /* reset device and negotiate default features */
+ ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
+ if (ret < 0)
+ return ret;
+
/* Setup interrupt callback */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
rte_intr_callback_register(&pci_dev->intr_handle,
- virtio_interrupt_handler, eth_dev);
-
- virtio_dev_cq_start(eth_dev);
+ virtio_interrupt_handler, eth_dev);
return 0;
}
@@ -1266,26 +1344,20 @@ static int
eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
- struct virtio_hw *hw = eth_dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return -EPERM;
- if (hw->started == 1) {
- virtio_dev_stop(eth_dev);
- virtio_dev_close(eth_dev);
- }
+ virtio_dev_stop(eth_dev);
+ virtio_dev_close(eth_dev);
pci_dev = eth_dev->pci_dev;
eth_dev->dev_ops = NULL;
eth_dev->tx_pkt_burst = NULL;
eth_dev->rx_pkt_burst = NULL;
- if (hw->cvq)
- virtio_dev_queue_release(hw->cvq->vq);
-
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -1303,32 +1375,29 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
static struct eth_driver rte_virtio_pmd = {
.pci_drv = {
- .name = "rte_virtio_pmd",
+ .driver = {
+ .name = "net_virtio",
+ },
.id_table = pci_id_virtio_map,
.drv_flags = RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_virtio_dev_init,
.eth_dev_uninit = eth_virtio_dev_uninit,
.dev_private_size = sizeof(struct virtio_hw),
};
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of PCI virtio devices.
- * Returns 0 on success.
- */
-static int
-rte_virtio_pmd_init(const char *name __rte_unused,
- const char *param __rte_unused)
+RTE_INIT(rte_virtio_pmd_init);
+static void
+rte_virtio_pmd_init(void)
{
if (rte_eal_iopl_init() != 0) {
PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
- return -1;
+ return;
}
- rte_eth_driver_register(&rte_virtio_pmd);
- return 0;
+ rte_eal_pci_register(&rte_virtio_pmd.pci_drv);
}
/*
@@ -1340,14 +1409,44 @@ virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint64_t req_features;
+ int ret;
PMD_INIT_LOG(DEBUG, "configure");
+ req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
+ if (rxmode->hw_ip_checksum)
+ req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
+ if (rxmode->enable_lro)
+ req_features |=
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6);
+
+ /* if request features changed, reinit the device */
+ if (req_features != hw->req_guest_features) {
+ ret = virtio_init_device(dev, req_features);
+ if (ret < 0)
+ return ret;
+ }
- if (rxmode->hw_ip_checksum) {
- PMD_DRV_LOG(ERR, "HW IP checksum not supported");
- return -EINVAL;
+ if (rxmode->hw_ip_checksum &&
+ !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
+ PMD_DRV_LOG(NOTICE,
+ "rx ip checksum not available on this host");
+ return -ENOTSUP;
+ }
+
+ if (rxmode->enable_lro &&
+ (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
+ PMD_DRV_LOG(NOTICE,
+ "lro not available on this host");
+ return -ENOTSUP;
}
+ /* start control queue */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
+ virtio_dev_cq_start(dev);
+
hw->vlan_strip = rxmode->hw_vlan_strip;
if (rxmode->hw_vlan_filter
@@ -1371,9 +1470,9 @@ static int
virtio_dev_start(struct rte_eth_dev *dev)
{
uint16_t nb_queues, i;
- struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq;
struct virtnet_tx *txvq __rte_unused;
+ struct virtio_hw *hw = dev->data->dev_private;
/* check if lsc interrupt feature is enabled */
if (dev->data->dev_conf.intr_conf.lsc) {
@@ -1391,29 +1490,19 @@ virtio_dev_start(struct rte_eth_dev *dev)
/* Initialize Link state */
virtio_dev_link_update(dev, 0);
- /* On restart after stop do not touch queues */
- if (hw->started)
- return 0;
-
- /* Do final configuration before rx/tx engine starts */
- virtio_dev_rxtx_start(dev);
- vtpci_reinit_complete(hw);
-
- hw->started = 1;
-
/*Notify the backend
*Otherwise the tap backend might already stop its queue due to fullness.
*vhost backend will have no chance to be waked up
*/
- nb_queues = dev->data->nb_rx_queues;
- if (nb_queues > 1) {
+ nb_queues = RTE_MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
+ if (hw->max_queue_pairs > 1) {
if (virtio_set_multiple_queues(dev, nb_queues) != 0)
return -EINVAL;
}
PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
- for (i = 0; i < nb_queues; i++) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxvq = dev->data->rx_queues[i];
virtqueue_notify(rxvq->vq);
}
@@ -1532,20 +1621,39 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
static void
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
+ uint64_t tso_mask;
struct virtio_hw *hw = dev->data->dev_private;
if (dev->pci_dev)
- dev_info->driver_name = dev->driver->pci_drv.name;
+ dev_info->driver_name = dev->driver->pci_drv.driver.name;
else
dev_info->driver_name = "virtio_user PMD";
- dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
- dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
+ dev_info->max_rx_queues =
+ RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
+ dev_info->max_tx_queues =
+ RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_TX_QUEUES);
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
dev_info->default_txconf = (struct rte_eth_txconf) {
.txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
};
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO;
+ dev_info->tx_offload_capa = 0;
+
+ if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
+ dev_info->tx_offload_capa |=
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+ }
+
+ tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6);
+ if ((hw->guest_features & tso_mask) == tso_mask)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
}
/*
@@ -1559,10 +1667,5 @@ __rte_unused uint8_t is_rx)
return 0;
}
-static struct rte_driver rte_virtio_driver = {
- .type = PMD_PDEV,
- .init = rte_virtio_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_virtio_driver, virtio_net);
-DRIVER_REGISTER_PCI_TABLE(virtio_net, pci_id_virtio_map);
+RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
+RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 2ecec6eb..27d9a190 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -47,14 +47,14 @@
#define PAGE_SIZE 4096
#endif
-#define VIRTIO_MAX_RX_QUEUES 128
-#define VIRTIO_MAX_TX_QUEUES 128
+#define VIRTIO_MAX_RX_QUEUES 128U
+#define VIRTIO_MAX_TX_QUEUES 128U
#define VIRTIO_MAX_MAC_ADDRS 64
#define VIRTIO_MIN_RX_BUFSIZE 64
#define VIRTIO_MAX_RX_PKTLEN 9728
/* Features desired/implemented by this driver. */
-#define VIRTIO_PMD_GUEST_FEATURES \
+#define VIRTIO_PMD_DEFAULT_GUEST_FEATURES \
(1u << VIRTIO_NET_F_MAC | \
1u << VIRTIO_NET_F_STATUS | \
1u << VIRTIO_NET_F_MQ | \
@@ -62,8 +62,13 @@
1u << VIRTIO_NET_F_CTRL_VQ | \
1u << VIRTIO_NET_F_CTRL_RX | \
1u << VIRTIO_NET_F_CTRL_VLAN | \
+ 1u << VIRTIO_NET_F_CSUM | \
+ 1u << VIRTIO_NET_F_HOST_TSO4 | \
+ 1u << VIRTIO_NET_F_HOST_TSO6 | \
1u << VIRTIO_NET_F_MRG_RXBUF | \
- 1ULL << VIRTIO_F_VERSION_1)
+ 1u << VIRTIO_RING_F_INDIRECT_DESC | \
+ 1ULL << VIRTIO_F_VERSION_1 | \
+ 1ULL << VIRTIO_F_IOMMU_PLATFORM)
/*
* CQ function prototype
@@ -73,31 +78,15 @@ void virtio_dev_cq_start(struct rte_eth_dev *dev);
/*
* RX/TX function prototypes
*/
-void virtio_dev_rxtx_start(struct rte_eth_dev *dev);
-
-int virtio_dev_queue_setup(struct rte_eth_dev *dev,
- int queue_type,
- uint16_t queue_idx,
- uint16_t vtpci_queue_idx,
- uint16_t nb_desc,
- unsigned int socket_id,
- void **pvq);
-
-void virtio_dev_queue_release(struct virtqueue *vq);
-
int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mb_pool);
-void virtio_dev_rx_queue_release(void *rxq);
-
int virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-void virtio_dev_tx_queue_release(void *txq);
-
uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
@@ -115,13 +104,4 @@ uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
-/*
- * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
- * frames larger than 1514 bytes. We do not yet support software LRO
- * via tcp_lro_rx().
- */
-#define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
- VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
-
-
#endif /* _VIRTIO_ETHDEV_H_ */
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index f1a7ca7e..9b47165d 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -745,8 +745,9 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
if (legacy_virtio_resource_init(dev, hw, dev_flags) < 0) {
if (dev->kdrv == RTE_KDRV_UNKNOWN &&
- (!dev->devargs ||
- dev->devargs->type != RTE_DEVTYPE_WHITELISTED_PCI)) {
+ (!dev->device.devargs ||
+ dev->device.devargs->type !=
+ RTE_DEVTYPE_WHITELISTED_PCI)) {
PMD_INIT_LOG(INFO,
"skip kernel managed virtio device.");
return 1;
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index dd7693fe..de271bfe 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -44,8 +44,8 @@ struct virtnet_ctl;
/* VirtIO PCI vendor/device ID. */
#define VIRTIO_PCI_VENDORID 0x1AF4
-#define VIRTIO_PCI_DEVICEID_MIN 0x1000
-#define VIRTIO_PCI_DEVICEID_MAX 0x103F
+#define VIRTIO_PCI_LEGACY_DEVICEID_NET 0x1000
+#define VIRTIO_PCI_MODERN_DEVICEID_NET 0x1041
/* VirtIO ABI version, this must match exactly. */
#define VIRTIO_PCI_ABI_VERSION 0
@@ -138,6 +138,7 @@ struct virtnet_ctl;
#define VIRTIO_RING_F_INDIRECT_DESC 28
#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_IOMMU_PLATFORM 33
/*
* Some VirtIO feature bits (currently bits 28 through 31) are
@@ -145,7 +146,7 @@ struct virtnet_ctl;
* rest are per-device feature bits.
*/
#define VIRTIO_TRANSPORT_F_START 28
-#define VIRTIO_TRANSPORT_F_END 32
+#define VIRTIO_TRANSPORT_F_END 34
/* The Guest publishes the used index for which it expects an interrupt
* at the end of the avail ring. Host should ignore the avail->flags field. */
@@ -245,14 +246,14 @@ struct virtio_net_config;
struct virtio_hw {
struct virtnet_ctl *cvq;
struct rte_pci_ioport io;
+ uint64_t req_guest_features;
uint64_t guest_features;
- uint32_t max_tx_queues;
- uint32_t max_rx_queues;
+ uint32_t max_queue_pairs;
uint16_t vtnet_hdr_size;
uint8_t vlan_strip;
uint8_t use_msix;
- uint8_t started;
uint8_t modern;
+ uint8_t use_simple_rxtx;
uint8_t mac_addr[ETHER_ADDR_LEN];
uint32_t notify_off_multiplier;
uint8_t *isr;
@@ -262,6 +263,8 @@ struct virtio_hw {
struct virtio_net_config *dev_cfg;
const struct virtio_pci_ops *vtpci_ops;
void *virtio_user_dev;
+
+ struct virtqueue **vqs;
};
/*
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 724517e2..22d97a4e 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -50,6 +50,11 @@
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_byteorder.h>
+#include <rte_cpuflags.h>
+#include <rte_net.h>
+#include <rte_ip.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
#include "virtio_logs.h"
#include "virtio_ethdev.h"
@@ -67,10 +72,6 @@
#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
ETH_TXQ_FLAGS_NOOFFLOADS)
-#ifdef RTE_MACHINE_CPUFLAG_SSSE3
-static int use_simple_rxtx;
-#endif
-
static void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
@@ -208,18 +209,70 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
return 0;
}
+/* When doing TSO, the IP length is not included in the pseudo header
+ * checksum of the packet given to the PMD, but for virtio it is
+ * expected.
+ */
+static void
+virtio_tso_fix_cksum(struct rte_mbuf *m)
+{
+ /* common case: header is not fragmented */
+ if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
+ m->l4_len)) {
+ struct ipv4_hdr *iph;
+ struct ipv6_hdr *ip6h;
+ struct tcp_hdr *th;
+ uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
+ uint32_t tmp;
+
+ iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+ th = RTE_PTR_ADD(iph, m->l3_len);
+ if ((iph->version_ihl >> 4) == 4) {
+ iph->hdr_checksum = 0;
+ iph->hdr_checksum = rte_ipv4_cksum(iph);
+ ip_len = iph->total_length;
+ ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
+ m->l3_len);
+ } else {
+ ip6h = (struct ipv6_hdr *)iph;
+ ip_paylen = ip6h->payload_len;
+ }
+
+ /* calculate the new phdr checksum not including ip_paylen */
+ prev_cksum = th->cksum;
+ tmp = prev_cksum;
+ tmp += ip_paylen;
+ tmp = (tmp & 0xffff) + (tmp >> 16);
+ new_cksum = tmp;
+
+ /* replace it in the packet */
+ th->cksum = new_cksum;
+ }
+}
+
+static inline int
+tx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
+}
+
static inline void
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t needed, int use_indirect, int can_push)
{
+ struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
struct vq_desc_extra *dxp;
struct virtqueue *vq = txvq->vq;
struct vring_desc *start_dp;
uint16_t seg_num = cookie->nb_segs;
uint16_t head_idx, idx;
uint16_t head_size = vq->hw->vtnet_hdr_size;
- unsigned long offs;
+ struct virtio_net_hdr *hdr;
+ int offload;
+ offload = tx_offload_enabled(vq->hw);
head_idx = vq->vq_desc_head_idx;
idx = head_idx;
dxp = &vq->vq_descx[idx];
@@ -229,10 +282,12 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
start_dp = vq->vq_ring.desc;
if (can_push) {
- /* put on zero'd transmit header (no offloads) */
- void *hdr = rte_pktmbuf_prepend(cookie, head_size);
-
- memset(hdr, 0, head_size);
+ /* prepend cannot fail, checked by caller */
+ hdr = (struct virtio_net_hdr *)
+ rte_pktmbuf_prepend(cookie, head_size);
+ /* if offload disabled, it is not zeroed below, do it now */
+ if (offload == 0)
+ memset(hdr, 0, head_size);
} else if (use_indirect) {
/* setup tx ring slot to point to indirect
* descriptor list stored in reserved region.
@@ -240,14 +295,11 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
* the first slot in indirect ring is already preset
* to point to the header in reserved region
*/
- struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
-
- offs = idx * sizeof(struct virtio_tx_region)
- + offsetof(struct virtio_tx_region, tx_indir);
-
- start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs;
+ start_dp[idx].addr = txvq->virtio_net_hdr_mem +
+ RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
start_dp[idx].flags = VRING_DESC_F_INDIRECT;
+ hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
/* loop below will fill in rest of the indirect elements */
start_dp = txr[idx].tx_indir;
@@ -256,15 +308,59 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
/* setup first tx ring slot to point to header
* stored in reserved region.
*/
- offs = idx * sizeof(struct virtio_tx_region)
- + offsetof(struct virtio_tx_region, tx_hdr);
-
- start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs;
+ start_dp[idx].addr = txvq->virtio_net_hdr_mem +
+ RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
start_dp[idx].len = vq->hw->vtnet_hdr_size;
start_dp[idx].flags = VRING_DESC_F_NEXT;
+ hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
+
idx = start_dp[idx].next;
}
+ /* Checksum Offload / TSO */
+ if (offload) {
+ if (cookie->ol_flags & PKT_TX_TCP_SEG)
+ cookie->ol_flags |= PKT_TX_TCP_CKSUM;
+
+ switch (cookie->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_UDP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct udp_hdr,
+ dgram_cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ case PKT_TX_TCP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ default:
+ hdr->csum_start = 0;
+ hdr->csum_offset = 0;
+ hdr->flags = 0;
+ break;
+ }
+
+ /* TCP Segmentation Offload */
+ if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+ virtio_tso_fix_cksum(cookie);
+ hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+ VIRTIO_NET_HDR_GSO_TCPV6 :
+ VIRTIO_NET_HDR_GSO_TCPV4;
+ hdr->gso_size = cookie->tso_segsz;
+ hdr->hdr_len =
+ cookie->l2_len +
+ cookie->l3_len +
+ cookie->l4_len;
+ } else {
+ hdr->gso_type = 0;
+ hdr->gso_size = 0;
+ hdr->hdr_len = 0;
+ }
+ }
+
do {
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
@@ -282,207 +378,120 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
vq_update_avail_ring(vq, head_idx);
}
-static void
-virtio_dev_vring_start(struct virtqueue *vq)
-{
- int size = vq->vq_nentries;
- struct vring *vr = &vq->vq_ring;
- uint8_t *ring_mem = vq->vq_ring_virt_mem;
-
- PMD_INIT_FUNC_TRACE();
-
- /*
- * Reinitialise since virtio port might have been stopped and restarted
- */
- memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
- vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
- vq->vq_used_cons_idx = 0;
- vq->vq_desc_head_idx = 0;
- vq->vq_avail_idx = 0;
- vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
- vq->vq_free_cnt = vq->vq_nentries;
- memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
-
- vring_desc_init(vr->desc, size);
-
- /*
- * Disable device(host) interrupting guest
- */
- virtqueue_disable_intr(vq);
-}
-
void
virtio_dev_cq_start(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
if (hw->cvq && hw->cvq->vq) {
- virtio_dev_vring_start(hw->cvq->vq);
VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
}
}
-void
-virtio_dev_rxtx_start(struct rte_eth_dev *dev)
+int
+virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id __rte_unused,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
{
- /*
- * Start receive and transmit vrings
- * - Setup vring structure for all queues
- * - Initialize descriptor for the rx vring
- * - Allocate blank mbufs for the each rx descriptor
- *
- */
- uint16_t i;
+ uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
+ struct virtnet_rx *rxvq;
+ int error, nbufs;
+ struct rte_mbuf *m;
uint16_t desc_idx;
PMD_INIT_FUNC_TRACE();
- /* Start rx vring. */
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct virtnet_rx *rxvq = dev->data->rx_queues[i];
- struct virtqueue *vq = rxvq->vq;
- int error, nbufs;
- struct rte_mbuf *m;
-
- virtio_dev_vring_start(vq);
- if (rxvq->mpool == NULL) {
- rte_exit(EXIT_FAILURE,
- "Cannot allocate mbufs for rx virtqueue");
- }
-
- /* Allocate blank mbufs for the each rx descriptor */
- nbufs = 0;
- error = ENOSPC;
-
-#ifdef RTE_MACHINE_CPUFLAG_SSSE3
- if (use_simple_rxtx) {
- for (desc_idx = 0; desc_idx < vq->vq_nentries;
- desc_idx++) {
- vq->vq_ring.avail->ring[desc_idx] = desc_idx;
- vq->vq_ring.desc[desc_idx].flags =
- VRING_DESC_F_WRITE;
- }
- }
-#endif
- memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
- for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
- desc_idx++) {
- vq->sw_ring[vq->vq_nentries + desc_idx] =
- &rxvq->fake_mbuf;
- }
-
- while (!virtqueue_full(vq)) {
- m = rte_mbuf_raw_alloc(rxvq->mpool);
- if (m == NULL)
- break;
+ if (nb_desc == 0 || nb_desc > vq->vq_nentries)
+ nb_desc = vq->vq_nentries;
+ vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
- /******************************************
- * Enqueue allocated buffers *
- *******************************************/
-#ifdef RTE_MACHINE_CPUFLAG_SSSE3
- if (use_simple_rxtx)
- error = virtqueue_enqueue_recv_refill_simple(vq, m);
- else
-#endif
- error = virtqueue_enqueue_recv_refill(vq, m);
- if (error) {
- rte_pktmbuf_free(m);
- break;
- }
- nbufs++;
- }
+ rxvq = &vq->rxq;
+ rxvq->queue_id = queue_idx;
+ rxvq->mpool = mp;
+ if (rxvq->mpool == NULL) {
+ rte_exit(EXIT_FAILURE,
+ "Cannot allocate mbufs for rx virtqueue");
+ }
+ dev->data->rx_queues[queue_idx] = rxvq;
- vq_update_avail_idx(vq);
- PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
+ /* Allocate blank mbufs for the each rx descriptor */
+ nbufs = 0;
+ error = ENOSPC;
- VIRTQUEUE_DUMP(vq);
+ if (hw->use_simple_rxtx) {
+ for (desc_idx = 0; desc_idx < vq->vq_nentries;
+ desc_idx++) {
+ vq->vq_ring.avail->ring[desc_idx] = desc_idx;
+ vq->vq_ring.desc[desc_idx].flags =
+ VRING_DESC_F_WRITE;
+ }
}
- /* Start tx vring. */
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct virtnet_tx *txvq = dev->data->tx_queues[i];
- struct virtqueue *vq = txvq->vq;
-
- virtio_dev_vring_start(vq);
-#ifdef RTE_MACHINE_CPUFLAG_SSSE3
- if (use_simple_rxtx) {
- uint16_t mid_idx = vq->vq_nentries >> 1;
-
- for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
- vq->vq_ring.avail->ring[desc_idx] =
- desc_idx + mid_idx;
- vq->vq_ring.desc[desc_idx + mid_idx].next =
- desc_idx;
- vq->vq_ring.desc[desc_idx + mid_idx].addr =
- txvq->virtio_net_hdr_mem +
- offsetof(struct virtio_tx_region, tx_hdr);
- vq->vq_ring.desc[desc_idx + mid_idx].len =
- vq->hw->vtnet_hdr_size;
- vq->vq_ring.desc[desc_idx + mid_idx].flags =
- VRING_DESC_F_NEXT;
- vq->vq_ring.desc[desc_idx].flags = 0;
- }
- for (desc_idx = mid_idx; desc_idx < vq->vq_nentries;
- desc_idx++)
- vq->vq_ring.avail->ring[desc_idx] = desc_idx;
- }
-#endif
- VIRTQUEUE_DUMP(vq);
+ memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
+ for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
+ desc_idx++) {
+ vq->sw_ring[vq->vq_nentries + desc_idx] =
+ &rxvq->fake_mbuf;
}
-}
-int
-virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
- uint16_t queue_idx,
- uint16_t nb_desc,
- unsigned int socket_id,
- __rte_unused const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mp)
-{
- uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
- struct virtnet_rx *rxvq;
- int ret;
+ while (!virtqueue_full(vq)) {
+ m = rte_mbuf_raw_alloc(rxvq->mpool);
+ if (m == NULL)
+ break;
- PMD_INIT_FUNC_TRACE();
- ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
- nb_desc, socket_id, (void **)&rxvq);
- if (ret < 0) {
- PMD_INIT_LOG(ERR, "rvq initialization failed");
- return ret;
+ /* Enqueue allocated buffers */
+ if (hw->use_simple_rxtx)
+ error = virtqueue_enqueue_recv_refill_simple(vq, m);
+ else
+ error = virtqueue_enqueue_recv_refill(vq, m);
+
+ if (error) {
+ rte_pktmbuf_free(m);
+ break;
+ }
+ nbufs++;
}
- /* Create mempool for rx mbuf allocation */
- rxvq->mpool = mp;
+ vq_update_avail_idx(vq);
- dev->data->rx_queues[queue_idx] = rxvq;
+ PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
-#ifdef RTE_MACHINE_CPUFLAG_SSSE3
virtio_rxq_vec_setup(rxvq);
-#endif
+
+ VIRTQUEUE_DUMP(vq);
return 0;
}
-void
-virtio_dev_rx_queue_release(void *rxq)
+static void
+virtio_update_rxtx_handler(struct rte_eth_dev *dev,
+ const struct rte_eth_txconf *tx_conf)
{
- struct virtnet_rx *rxvq = rxq;
- struct virtqueue *vq;
- const struct rte_memzone *mz;
-
- if (rxvq == NULL)
- return;
-
- /*
- * rxvq is freed when vq is freed, and as mz should be freed after the
- * del_queue, so we reserve the mz pointer first.
- */
- vq = rxvq->vq;
- mz = rxvq->mz;
+ uint8_t use_simple_rxtx = 0;
+ struct virtio_hw *hw = dev->data->dev_private;
- virtio_dev_queue_release(vq);
- rte_memzone_free(mz);
+#if defined RTE_ARCH_X86
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3))
+ use_simple_rxtx = 1;
+#elif defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
+ use_simple_rxtx = 1;
+#endif
+ /* Use simple rx/tx func if single segment and no offloads */
+ if (use_simple_rxtx &&
+ (tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS &&
+ !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO, "Using simple rx/tx path");
+ dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+ dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ hw->use_simple_rxtx = use_simple_rxtx;
+ }
}
/*
@@ -496,45 +505,26 @@ int
virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
- unsigned int socket_id,
+ unsigned int socket_id __rte_unused,
const struct rte_eth_txconf *tx_conf)
{
uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
-
-#ifdef RTE_MACHINE_CPUFLAG_SSSE3
struct virtio_hw *hw = dev->data->dev_private;
-#endif
+ struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
struct virtnet_tx *txvq;
- struct virtqueue *vq;
uint16_t tx_free_thresh;
- int ret;
+ uint16_t desc_idx;
PMD_INIT_FUNC_TRACE();
- if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS)
- != ETH_TXQ_FLAGS_NOXSUMS) {
- PMD_INIT_LOG(ERR, "TX checksum offload not supported\n");
- return -EINVAL;
- }
+ virtio_update_rxtx_handler(dev, tx_conf);
-#ifdef RTE_MACHINE_CPUFLAG_SSSE3
- /* Use simple rx/tx func if single segment and no offloads */
- if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS &&
- !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- PMD_INIT_LOG(INFO, "Using simple rx/tx path");
- dev->tx_pkt_burst = virtio_xmit_pkts_simple;
- dev->rx_pkt_burst = virtio_recv_pkts_vec;
- use_simple_rxtx = 1;
- }
-#endif
+ if (nb_desc == 0 || nb_desc > vq->vq_nentries)
+ nb_desc = vq->vq_nentries;
+ vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
- ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
- nb_desc, socket_id, (void **)&txvq);
- if (ret < 0) {
- PMD_INIT_LOG(ERR, "tvq initialization failed");
- return ret;
- }
- vq = txvq->vq;
+ txvq = &vq->txq;
+ txvq->queue_id = queue_idx;
tx_free_thresh = tx_conf->tx_free_thresh;
if (tx_free_thresh == 0)
@@ -552,32 +542,32 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
vq->vq_free_thresh = tx_free_thresh;
- dev->data->tx_queues[queue_idx] = txvq;
- return 0;
-}
-
-void
-virtio_dev_tx_queue_release(void *txq)
-{
- struct virtnet_tx *txvq = txq;
- struct virtqueue *vq;
- const struct rte_memzone *mz;
- const struct rte_memzone *hdr_mz;
-
- if (txvq == NULL)
- return;
+ if (hw->use_simple_rxtx) {
+ uint16_t mid_idx = vq->vq_nentries >> 1;
+
+ for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
+ vq->vq_ring.avail->ring[desc_idx] =
+ desc_idx + mid_idx;
+ vq->vq_ring.desc[desc_idx + mid_idx].next =
+ desc_idx;
+ vq->vq_ring.desc[desc_idx + mid_idx].addr =
+ txvq->virtio_net_hdr_mem +
+ offsetof(struct virtio_tx_region, tx_hdr);
+ vq->vq_ring.desc[desc_idx + mid_idx].len =
+ vq->hw->vtnet_hdr_size;
+ vq->vq_ring.desc[desc_idx + mid_idx].flags =
+ VRING_DESC_F_NEXT;
+ vq->vq_ring.desc[desc_idx].flags = 0;
+ }
+ for (desc_idx = mid_idx; desc_idx < vq->vq_nentries;
+ desc_idx++)
+ vq->vq_ring.avail->ring[desc_idx] = desc_idx;
+ }
- /*
- * txvq is freed when vq is freed, and as mz should be freed after the
- * del_queue, so we reserve the mz pointer first.
- */
- vq = txvq->vq;
- mz = txvq->mz;
- hdr_mz = txvq->virtio_net_hdr_mz;
+ VIRTQUEUE_DUMP(vq);
- virtio_dev_queue_release(vq);
- rte_memzone_free(mz);
- rte_memzone_free(hdr_mz);
+ dev->data->tx_queues[queue_idx] = txvq;
+ return 0;
}
static void
@@ -627,6 +617,86 @@ virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
}
}
+/* Optionally fill offload information in structure */
+static int
+virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
+{
+ struct rte_net_hdr_lens hdr_lens;
+ uint32_t hdrlen, ptype;
+ int l4_supported = 0;
+
+ /* nothing to do */
+ if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
+ return 0;
+
+ m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+
+ ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
+ m->packet_type = ptype;
+ if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
+ (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
+ (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
+ l4_supported = 1;
+
+ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
+ if (hdr->csum_start <= hdrlen && l4_supported) {
+ m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ } else {
+ /* Unknown proto or tunnel, do sw cksum. We can assume
+ * the cksum field is in the first segment since the
+ * buffers we provided to the host are large enough.
+ * In case of SCTP, this will be wrong since it's a CRC
+ * but there's nothing we can do.
+ */
+ uint16_t csum, off;
+
+ rte_raw_cksum_mbuf(m, hdr->csum_start,
+ rte_pktmbuf_pkt_len(m) - hdr->csum_start,
+ &csum);
+ if (likely(csum != 0xffff))
+ csum = ~csum;
+ off = hdr->csum_offset + hdr->csum_start;
+ if (rte_pktmbuf_data_len(m) >= off + 1)
+ *rte_pktmbuf_mtod_offset(m, uint16_t *,
+ off) = csum;
+ }
+ } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+
+ /* GSO request, save required information in mbuf */
+ if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ /* Check unsupported modes */
+ if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
+ (hdr->gso_size == 0)) {
+ return -EINVAL;
+ }
+
+ /* Update mss lengthes in mbuf */
+ m->tso_segsz = hdr->gso_size;
+ switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ m->ol_flags |= PKT_RX_LRO | \
+ PKT_RX_L4_CKSUM_NONE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+rx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
+}
+
#define VIRTIO_MBUF_BURST_SZ 64
#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
uint16_t
@@ -642,6 +712,8 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
int error;
uint32_t i, nb_enqueued;
uint32_t hdr_size;
+ int offload;
+ struct virtio_net_hdr *hdr;
nb_used = VIRTQUEUE_NUSED(vq);
@@ -659,6 +731,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_rx = 0;
nb_enqueued = 0;
hdr_size = hw->vtnet_hdr_size;
+ offload = rx_offload_enabled(hw);
for (i = 0; i < num ; i++) {
rxm = rcv_pkts[i];
@@ -683,9 +756,18 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
+ hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
+ RTE_PKTMBUF_HEADROOM - hdr_size);
+
if (hw->vlan_strip)
rte_vlan_strip(rxm);
+ if (offload && virtio_rx_offload(rxm, hdr) < 0) {
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
rx_pkts[nb_rx++] = rxm;
@@ -745,6 +827,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint16_t extra_idx;
uint32_t seg_res;
uint32_t hdr_size;
+ int offload;
nb_used = VIRTQUEUE_NUSED(vq);
@@ -760,6 +843,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
extra_idx = 0;
seg_res = 0;
hdr_size = hw->vtnet_hdr_size;
+ offload = rx_offload_enabled(hw);
while (i < nb_used) {
struct virtio_net_hdr_mrg_rxbuf *header;
@@ -805,6 +889,12 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rx_pkts[nb_rx] = rxm;
prev = rxm;
+ if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
+ virtio_discard_rxbuf(vq, rxm);
+ rxvq->stats.errors++;
+ continue;
+ }
+
seg_res = seg_num - 1;
while (seg_res != 0) {
diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h
index 058b56a1..28f82d6a 100644
--- a/drivers/net/virtio/virtio_rxtx.h
+++ b/drivers/net/virtio/virtio_rxtx.h
@@ -86,10 +86,9 @@ struct virtnet_ctl {
const struct rte_memzone *mz; /**< mem zone to populate RX ring. */
};
-#ifdef RTE_MACHINE_CPUFLAG_SSSE3
int virtio_rxq_vec_setup(struct virtnet_rx *rxvq);
int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
struct rte_mbuf *m);
-#endif
+
#endif /* _VIRTIO_RXTX_H_ */
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index 6517aa80..b651e53b 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -37,8 +37,6 @@
#include <string.h>
#include <errno.h>
-#include <tmmintrin.h>
-
#include <rte_cycles.h>
#include <rte_memory.h>
#include <rte_memzone.h>
@@ -53,14 +51,7 @@
#include <rte_errno.h>
#include <rte_byteorder.h>
-#include "virtio_logs.h"
-#include "virtio_ethdev.h"
-#include "virtqueue.h"
-#include "virtio_rxtx.h"
-
-#define RTE_VIRTIO_VPMD_RX_BURST 32
-#define RTE_VIRTIO_DESC_PER_LOOP 8
-#define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
+#include "virtio_rxtx_simple.h"
#ifndef __INTEL_COMPILER
#pragma GCC diagnostic ignored "-Wcast-qual"
@@ -92,257 +83,6 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
return 0;
}
-static inline void
-virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
-{
- int i;
- uint16_t desc_idx;
- struct rte_mbuf **sw_ring;
- struct vring_desc *start_dp;
- int ret;
- struct virtqueue *vq = rxvq->vq;
-
- desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
- sw_ring = &vq->sw_ring[desc_idx];
- start_dp = &vq->vq_ring.desc[desc_idx];
-
- ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
- RTE_VIRTIO_VPMD_RX_REARM_THRESH);
- if (unlikely(ret)) {
- rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed +=
- RTE_VIRTIO_VPMD_RX_REARM_THRESH;
- return;
- }
-
- for (i = 0; i < RTE_VIRTIO_VPMD_RX_REARM_THRESH; i++) {
- uintptr_t p;
-
- p = (uintptr_t)&sw_ring[i]->rearm_data;
- *(uint64_t *)p = rxvq->mbuf_initializer;
-
- start_dp[i].addr =
- VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
- RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
- start_dp[i].len = sw_ring[i]->buf_len -
- RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
- }
-
- vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
- vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
- vq_update_avail_idx(vq);
-}
-
-/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
- *
- * This routine is for non-mergeable RX, one desc for each guest buffer.
- * This routine is based on the RX ring layout optimization. Each entry in the
- * avail ring points to the desc with the same index in the desc ring and this
- * will never be changed in the driver.
- *
- * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
- */
-uint16_t
-virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
-{
- struct virtnet_rx *rxvq = rx_queue;
- struct virtqueue *vq = rxvq->vq;
- uint16_t nb_used;
- uint16_t desc_idx;
- struct vring_used_elem *rused;
- struct rte_mbuf **sw_ring;
- struct rte_mbuf **sw_ring_end;
- uint16_t nb_pkts_received;
- __m128i shuf_msk1, shuf_msk2, len_adjust;
-
- shuf_msk1 = _mm_set_epi8(
- 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, /* vlan tci */
- 5, 4, /* dat len */
- 0xFF, 0xFF, 5, 4, /* pkt len */
- 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
-
- );
-
- shuf_msk2 = _mm_set_epi8(
- 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, /* vlan tci */
- 13, 12, /* dat len */
- 0xFF, 0xFF, 13, 12, /* pkt len */
- 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
- );
-
- /* Subtract the header length.
- * In which case do we need the header length in used->len ?
- */
- len_adjust = _mm_set_epi16(
- 0, 0,
- 0,
- (uint16_t)-vq->hw->vtnet_hdr_size,
- 0, (uint16_t)-vq->hw->vtnet_hdr_size,
- 0, 0);
-
- if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
- return 0;
-
- nb_used = VIRTQUEUE_NUSED(vq);
-
- rte_compiler_barrier();
-
- if (unlikely(nb_used == 0))
- return 0;
-
- nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
- nb_used = RTE_MIN(nb_used, nb_pkts);
-
- desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- rused = &vq->vq_ring.used->ring[desc_idx];
- sw_ring = &vq->sw_ring[desc_idx];
- sw_ring_end = &vq->sw_ring[vq->vq_nentries];
-
- _mm_prefetch((const void *)rused, _MM_HINT_T0);
-
- if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
- virtio_rxq_rearm_vec(rxvq);
- if (unlikely(virtqueue_kick_prepare(vq)))
- virtqueue_notify(vq);
- }
-
- for (nb_pkts_received = 0;
- nb_pkts_received < nb_used;) {
- __m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
- __m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
- __m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
-
- mbp[0] = _mm_loadu_si128((__m128i *)(sw_ring + 0));
- desc[0] = _mm_loadu_si128((__m128i *)(rused + 0));
- _mm_storeu_si128((__m128i *)&rx_pkts[0], mbp[0]);
-
- mbp[1] = _mm_loadu_si128((__m128i *)(sw_ring + 2));
- desc[1] = _mm_loadu_si128((__m128i *)(rused + 2));
- _mm_storeu_si128((__m128i *)&rx_pkts[2], mbp[1]);
-
- mbp[2] = _mm_loadu_si128((__m128i *)(sw_ring + 4));
- desc[2] = _mm_loadu_si128((__m128i *)(rused + 4));
- _mm_storeu_si128((__m128i *)&rx_pkts[4], mbp[2]);
-
- mbp[3] = _mm_loadu_si128((__m128i *)(sw_ring + 6));
- desc[3] = _mm_loadu_si128((__m128i *)(rused + 6));
- _mm_storeu_si128((__m128i *)&rx_pkts[6], mbp[3]);
-
- pkt_mb[1] = _mm_shuffle_epi8(desc[0], shuf_msk2);
- pkt_mb[0] = _mm_shuffle_epi8(desc[0], shuf_msk1);
- pkt_mb[1] = _mm_add_epi16(pkt_mb[1], len_adjust);
- pkt_mb[0] = _mm_add_epi16(pkt_mb[0], len_adjust);
- _mm_storeu_si128((void *)&rx_pkts[1]->rx_descriptor_fields1,
- pkt_mb[1]);
- _mm_storeu_si128((void *)&rx_pkts[0]->rx_descriptor_fields1,
- pkt_mb[0]);
-
- pkt_mb[3] = _mm_shuffle_epi8(desc[1], shuf_msk2);
- pkt_mb[2] = _mm_shuffle_epi8(desc[1], shuf_msk1);
- pkt_mb[3] = _mm_add_epi16(pkt_mb[3], len_adjust);
- pkt_mb[2] = _mm_add_epi16(pkt_mb[2], len_adjust);
- _mm_storeu_si128((void *)&rx_pkts[3]->rx_descriptor_fields1,
- pkt_mb[3]);
- _mm_storeu_si128((void *)&rx_pkts[2]->rx_descriptor_fields1,
- pkt_mb[2]);
-
- pkt_mb[5] = _mm_shuffle_epi8(desc[2], shuf_msk2);
- pkt_mb[4] = _mm_shuffle_epi8(desc[2], shuf_msk1);
- pkt_mb[5] = _mm_add_epi16(pkt_mb[5], len_adjust);
- pkt_mb[4] = _mm_add_epi16(pkt_mb[4], len_adjust);
- _mm_storeu_si128((void *)&rx_pkts[5]->rx_descriptor_fields1,
- pkt_mb[5]);
- _mm_storeu_si128((void *)&rx_pkts[4]->rx_descriptor_fields1,
- pkt_mb[4]);
-
- pkt_mb[7] = _mm_shuffle_epi8(desc[3], shuf_msk2);
- pkt_mb[6] = _mm_shuffle_epi8(desc[3], shuf_msk1);
- pkt_mb[7] = _mm_add_epi16(pkt_mb[7], len_adjust);
- pkt_mb[6] = _mm_add_epi16(pkt_mb[6], len_adjust);
- _mm_storeu_si128((void *)&rx_pkts[7]->rx_descriptor_fields1,
- pkt_mb[7]);
- _mm_storeu_si128((void *)&rx_pkts[6]->rx_descriptor_fields1,
- pkt_mb[6]);
-
- if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
- if (sw_ring + nb_used <= sw_ring_end)
- nb_pkts_received += nb_used;
- else
- nb_pkts_received += sw_ring_end - sw_ring;
- break;
- } else {
- if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
- sw_ring_end)) {
- nb_pkts_received += sw_ring_end - sw_ring;
- break;
- } else {
- nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
-
- rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
- sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
- rused += RTE_VIRTIO_DESC_PER_LOOP;
- nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
- }
- }
- }
-
- vq->vq_used_cons_idx += nb_pkts_received;
- vq->vq_free_cnt += nb_pkts_received;
- rxvq->stats.packets += nb_pkts_received;
- return nb_pkts_received;
-}
-
-#define VIRTIO_TX_FREE_THRESH 32
-#define VIRTIO_TX_MAX_FREE_BUF_SZ 32
-#define VIRTIO_TX_FREE_NR 32
-/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
-static inline void
-virtio_xmit_cleanup(struct virtqueue *vq)
-{
- uint16_t i, desc_idx;
- uint32_t nb_free = 0;
- struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
-
- desc_idx = (uint16_t)(vq->vq_used_cons_idx &
- ((vq->vq_nentries >> 1) - 1));
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = __rte_pktmbuf_prefree_seg(m);
- if (likely(m != NULL)) {
- free[0] = m;
- nb_free = 1;
- for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = __rte_pktmbuf_prefree_seg(m);
- if (likely(m != NULL)) {
- if (likely(m->pool == free[0]->pool))
- free[nb_free++] = m;
- else {
- rte_mempool_put_bulk(free[0]->pool,
- (void **)free,
- RTE_MIN(RTE_DIM(free),
- nb_free));
- free[0] = m;
- nb_free = 1;
- }
- }
- }
- rte_mempool_put_bulk(free[0]->pool, (void **)free,
- RTE_MIN(RTE_DIM(free), nb_free));
- } else {
- for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
- m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = __rte_pktmbuf_prefree_seg(m);
- if (m != NULL)
- rte_mempool_put(m->pool, m);
- }
- }
-
- vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR;
- vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1);
-}
-
uint16_t
virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
@@ -423,3 +163,13 @@ virtio_rxq_vec_setup(struct virtnet_rx *rxq)
return 0;
}
+
+/* Stub for linkage when arch specific implementation is not available */
+uint16_t __attribute__((weak))
+virtio_recv_pkts_vec(void *rx_queue __rte_unused,
+ struct rte_mbuf **rx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ rte_panic("Wrong weak function linked by linker\n");
+ return 0;
+}
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
new file mode 100644
index 00000000..b08f8594
--- /dev/null
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -0,0 +1,136 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _VIRTIO_RXTX_SIMPLE_H_
+#define _VIRTIO_RXTX_SIMPLE_H_
+
+#include <stdint.h>
+
+#include "virtio_logs.h"
+#include "virtio_ethdev.h"
+#include "virtqueue.h"
+#include "virtio_rxtx.h"
+
+#define RTE_VIRTIO_VPMD_RX_BURST 32
+#define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
+
+static inline void
+virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
+{
+ int i;
+ uint16_t desc_idx;
+ struct rte_mbuf **sw_ring;
+ struct vring_desc *start_dp;
+ int ret;
+ struct virtqueue *vq = rxvq->vq;
+
+ desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
+ sw_ring = &vq->sw_ring[desc_idx];
+ start_dp = &vq->vq_ring.desc[desc_idx];
+
+ ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
+ RTE_VIRTIO_VPMD_RX_REARM_THRESH);
+ if (unlikely(ret)) {
+ rte_eth_devices[rxvq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ return;
+ }
+
+ for (i = 0; i < RTE_VIRTIO_VPMD_RX_REARM_THRESH; i++) {
+ uintptr_t p;
+
+ p = (uintptr_t)&sw_ring[i]->rearm_data;
+ *(uint64_t *)p = rxvq->mbuf_initializer;
+
+ start_dp[i].addr =
+ VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
+ RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
+ start_dp[i].len = sw_ring[i]->buf_len -
+ RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
+ }
+
+ vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+ vq_update_avail_idx(vq);
+}
+
+#define VIRTIO_TX_FREE_THRESH 32
+#define VIRTIO_TX_MAX_FREE_BUF_SZ 32
+#define VIRTIO_TX_FREE_NR 32
+/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
+static inline void
+virtio_xmit_cleanup(struct virtqueue *vq)
+{
+ uint16_t i, desc_idx;
+ uint32_t nb_free = 0;
+ struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
+
+ desc_idx = (uint16_t)(vq->vq_used_cons_idx &
+ ((vq->vq_nentries >> 1) - 1));
+ m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
+ m = __rte_pktmbuf_prefree_seg(m);
+ if (likely(m != NULL)) {
+ free[0] = m;
+ nb_free = 1;
+ for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
+ m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
+ m = __rte_pktmbuf_prefree_seg(m);
+ if (likely(m != NULL)) {
+ if (likely(m->pool == free[0]->pool))
+ free[nb_free++] = m;
+ else {
+ rte_mempool_put_bulk(free[0]->pool,
+ (void **)free,
+ RTE_MIN(RTE_DIM(free),
+ nb_free));
+ free[0] = m;
+ nb_free = 1;
+ }
+ }
+ }
+ rte_mempool_put_bulk(free[0]->pool, (void **)free,
+ RTE_MIN(RTE_DIM(free), nb_free));
+ } else {
+ for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
+ m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
+ m = __rte_pktmbuf_prefree_seg(m);
+ if (m != NULL)
+ rte_mempool_put(m->pool, m);
+ }
+ }
+
+ vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR;
+ vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1);
+}
+
+#endif /* _VIRTIO_RXTX_SIMPLE_H_ */
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
new file mode 100644
index 00000000..793eefbe
--- /dev/null
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -0,0 +1,235 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_byteorder.h>
+#include <rte_branch_prediction.h>
+#include <rte_cycles.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+#include <rte_vect.h>
+
+#include "virtio_rxtx_simple.h"
+
+#define RTE_VIRTIO_VPMD_RX_BURST 32
+#define RTE_VIRTIO_DESC_PER_LOOP 8
+#define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
+
+/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
+ *
+ * This routine is for non-mergeable RX, one desc for each guest buffer.
+ * This routine is based on the RX ring layout optimization. Each entry in the
+ * avail ring points to the desc with the same index in the desc ring and this
+ * will never be changed in the driver.
+ *
+ * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
+ */
+uint16_t
+virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ uint16_t nb_used;
+ uint16_t desc_idx;
+ struct vring_used_elem *rused;
+ struct rte_mbuf **sw_ring;
+ struct rte_mbuf **sw_ring_end;
+ uint16_t nb_pkts_received;
+
+ uint8x16_t shuf_msk1 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
+ 4, 5, 0xFF, 0xFF, /* pkt len */
+ 4, 5, /* dat len */
+ 0xFF, 0xFF, /* vlan tci */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ };
+
+ uint8x16_t shuf_msk2 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
+ 12, 13, 0xFF, 0xFF, /* pkt len */
+ 12, 13, /* dat len */
+ 0xFF, 0xFF, /* vlan tci */
+ 0xFF, 0xFF, 0xFF, 0xFF
+ };
+
+ /* Subtract the header length.
+ * In which case do we need the header length in used->len ?
+ */
+ uint16x8_t len_adjust = {
+ 0, 0,
+ (uint16_t)vq->hw->vtnet_hdr_size, 0,
+ (uint16_t)vq->hw->vtnet_hdr_size,
+ 0,
+ 0, 0
+ };
+
+ if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
+ return 0;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ rte_rmb();
+
+ if (unlikely(nb_used == 0))
+ return 0;
+
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
+ nb_used = RTE_MIN(nb_used, nb_pkts);
+
+ desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ rused = &vq->vq_ring.used->ring[desc_idx];
+ sw_ring = &vq->sw_ring[desc_idx];
+ sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+
+ rte_prefetch_non_temporal(rused);
+
+ if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxvq);
+ if (unlikely(virtqueue_kick_prepare(vq)))
+ virtqueue_notify(vq);
+ }
+
+ for (nb_pkts_received = 0;
+ nb_pkts_received < nb_used;) {
+ uint64x2_t desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
+ uint64x2_t mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
+ uint64x2_t pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
+
+ mbp[0] = vld1q_u64((uint64_t *)(sw_ring + 0));
+ desc[0] = vld1q_u64((uint64_t *)(rused + 0));
+ vst1q_u64((uint64_t *)&rx_pkts[0], mbp[0]);
+
+ mbp[1] = vld1q_u64((uint64_t *)(sw_ring + 2));
+ desc[1] = vld1q_u64((uint64_t *)(rused + 2));
+ vst1q_u64((uint64_t *)&rx_pkts[2], mbp[1]);
+
+ mbp[2] = vld1q_u64((uint64_t *)(sw_ring + 4));
+ desc[2] = vld1q_u64((uint64_t *)(rused + 4));
+ vst1q_u64((uint64_t *)&rx_pkts[4], mbp[2]);
+
+ mbp[3] = vld1q_u64((uint64_t *)(sw_ring + 6));
+ desc[3] = vld1q_u64((uint64_t *)(rused + 6));
+ vst1q_u64((uint64_t *)&rx_pkts[6], mbp[3]);
+
+ pkt_mb[1] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[0]), shuf_msk2));
+ pkt_mb[0] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[0]), shuf_msk1));
+ pkt_mb[1] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[1]), len_adjust));
+ pkt_mb[0] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[0]), len_adjust));
+ vst1q_u64((void *)&rx_pkts[1]->rx_descriptor_fields1,
+ pkt_mb[1]);
+ vst1q_u64((void *)&rx_pkts[0]->rx_descriptor_fields1,
+ pkt_mb[0]);
+
+ pkt_mb[3] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[1]), shuf_msk2));
+ pkt_mb[2] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[1]), shuf_msk1));
+ pkt_mb[3] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[3]), len_adjust));
+ pkt_mb[2] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[2]), len_adjust));
+ vst1q_u64((void *)&rx_pkts[3]->rx_descriptor_fields1,
+ pkt_mb[3]);
+ vst1q_u64((void *)&rx_pkts[2]->rx_descriptor_fields1,
+ pkt_mb[2]);
+
+ pkt_mb[5] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[2]), shuf_msk2));
+ pkt_mb[4] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[2]), shuf_msk1));
+ pkt_mb[5] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[5]), len_adjust));
+ pkt_mb[4] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[4]), len_adjust));
+ vst1q_u64((void *)&rx_pkts[5]->rx_descriptor_fields1,
+ pkt_mb[5]);
+ vst1q_u64((void *)&rx_pkts[4]->rx_descriptor_fields1,
+ pkt_mb[4]);
+
+ pkt_mb[7] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[3]), shuf_msk2));
+ pkt_mb[6] = vreinterpretq_u64_u8(vqtbl1q_u8(
+ vreinterpretq_u8_u64(desc[3]), shuf_msk1));
+ pkt_mb[7] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[7]), len_adjust));
+ pkt_mb[6] = vreinterpretq_u64_u16(vsubq_u16(
+ vreinterpretq_u16_u64(pkt_mb[6]), len_adjust));
+ vst1q_u64((void *)&rx_pkts[7]->rx_descriptor_fields1,
+ pkt_mb[7]);
+ vst1q_u64((void *)&rx_pkts[6]->rx_descriptor_fields1,
+ pkt_mb[6]);
+
+ if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
+ if (sw_ring + nb_used <= sw_ring_end)
+ nb_pkts_received += nb_used;
+ else
+ nb_pkts_received += sw_ring_end - sw_ring;
+ break;
+ } else {
+ if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
+ sw_ring_end)) {
+ nb_pkts_received += sw_ring_end - sw_ring;
+ break;
+ } else {
+ nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
+
+ rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
+ sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
+ rused += RTE_VIRTIO_DESC_PER_LOOP;
+ nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
+ }
+ }
+ }
+
+ vq->vq_used_cons_idx += nb_pkts_received;
+ vq->vq_free_cnt += nb_pkts_received;
+ rxvq->stats.packets += nb_pkts_received;
+ return nb_pkts_received;
+}
diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
new file mode 100644
index 00000000..87bb5c63
--- /dev/null
+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -0,0 +1,222 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <tmmintrin.h>
+
+#include <rte_byteorder.h>
+#include <rte_branch_prediction.h>
+#include <rte_cycles.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+
+#include "virtio_rxtx_simple.h"
+
+#define RTE_VIRTIO_VPMD_RX_BURST 32
+#define RTE_VIRTIO_DESC_PER_LOOP 8
+#define RTE_VIRTIO_VPMD_RX_REARM_THRESH RTE_VIRTIO_VPMD_RX_BURST
+
+/* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
+ *
+ * This routine is for non-mergeable RX, one desc for each guest buffer.
+ * This routine is based on the RX ring layout optimization. Each entry in the
+ * avail ring points to the desc with the same index in the desc ring and this
+ * will never be changed in the driver.
+ *
+ * - nb_pkts < RTE_VIRTIO_DESC_PER_LOOP, just return no packet
+ */
+uint16_t
+virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_rx *rxvq = rx_queue;
+ struct virtqueue *vq = rxvq->vq;
+ uint16_t nb_used;
+ uint16_t desc_idx;
+ struct vring_used_elem *rused;
+ struct rte_mbuf **sw_ring;
+ struct rte_mbuf **sw_ring_end;
+ uint16_t nb_pkts_received;
+ __m128i shuf_msk1, shuf_msk2, len_adjust;
+
+ shuf_msk1 = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, /* vlan tci */
+ 5, 4, /* dat len */
+ 0xFF, 0xFF, 5, 4, /* pkt len */
+ 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
+
+ );
+
+ shuf_msk2 = _mm_set_epi8(
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, /* vlan tci */
+ 13, 12, /* dat len */
+ 0xFF, 0xFF, 13, 12, /* pkt len */
+ 0xFF, 0xFF, 0xFF, 0xFF /* packet type */
+ );
+
+ /* Subtract the header length.
+ * In which case do we need the header length in used->len ?
+ */
+ len_adjust = _mm_set_epi16(
+ 0, 0,
+ 0,
+ (uint16_t)-vq->hw->vtnet_hdr_size,
+ 0, (uint16_t)-vq->hw->vtnet_hdr_size,
+ 0, 0);
+
+ if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
+ return 0;
+
+ nb_used = VIRTQUEUE_NUSED(vq);
+
+ rte_compiler_barrier();
+
+ if (unlikely(nb_used == 0))
+ return 0;
+
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
+ nb_used = RTE_MIN(nb_used, nb_pkts);
+
+ desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ rused = &vq->vq_ring.used->ring[desc_idx];
+ sw_ring = &vq->sw_ring[desc_idx];
+ sw_ring_end = &vq->sw_ring[vq->vq_nentries];
+
+ rte_prefetch0(rused);
+
+ if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+ virtio_rxq_rearm_vec(rxvq);
+ if (unlikely(virtqueue_kick_prepare(vq)))
+ virtqueue_notify(vq);
+ }
+
+ for (nb_pkts_received = 0;
+ nb_pkts_received < nb_used;) {
+ __m128i desc[RTE_VIRTIO_DESC_PER_LOOP / 2];
+ __m128i mbp[RTE_VIRTIO_DESC_PER_LOOP / 2];
+ __m128i pkt_mb[RTE_VIRTIO_DESC_PER_LOOP];
+
+ mbp[0] = _mm_loadu_si128((__m128i *)(sw_ring + 0));
+ desc[0] = _mm_loadu_si128((__m128i *)(rused + 0));
+ _mm_storeu_si128((__m128i *)&rx_pkts[0], mbp[0]);
+
+ mbp[1] = _mm_loadu_si128((__m128i *)(sw_ring + 2));
+ desc[1] = _mm_loadu_si128((__m128i *)(rused + 2));
+ _mm_storeu_si128((__m128i *)&rx_pkts[2], mbp[1]);
+
+ mbp[2] = _mm_loadu_si128((__m128i *)(sw_ring + 4));
+ desc[2] = _mm_loadu_si128((__m128i *)(rused + 4));
+ _mm_storeu_si128((__m128i *)&rx_pkts[4], mbp[2]);
+
+ mbp[3] = _mm_loadu_si128((__m128i *)(sw_ring + 6));
+ desc[3] = _mm_loadu_si128((__m128i *)(rused + 6));
+ _mm_storeu_si128((__m128i *)&rx_pkts[6], mbp[3]);
+
+ pkt_mb[1] = _mm_shuffle_epi8(desc[0], shuf_msk2);
+ pkt_mb[0] = _mm_shuffle_epi8(desc[0], shuf_msk1);
+ pkt_mb[1] = _mm_add_epi16(pkt_mb[1], len_adjust);
+ pkt_mb[0] = _mm_add_epi16(pkt_mb[0], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[1]->rx_descriptor_fields1,
+ pkt_mb[1]);
+ _mm_storeu_si128((void *)&rx_pkts[0]->rx_descriptor_fields1,
+ pkt_mb[0]);
+
+ pkt_mb[3] = _mm_shuffle_epi8(desc[1], shuf_msk2);
+ pkt_mb[2] = _mm_shuffle_epi8(desc[1], shuf_msk1);
+ pkt_mb[3] = _mm_add_epi16(pkt_mb[3], len_adjust);
+ pkt_mb[2] = _mm_add_epi16(pkt_mb[2], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[3]->rx_descriptor_fields1,
+ pkt_mb[3]);
+ _mm_storeu_si128((void *)&rx_pkts[2]->rx_descriptor_fields1,
+ pkt_mb[2]);
+
+ pkt_mb[5] = _mm_shuffle_epi8(desc[2], shuf_msk2);
+ pkt_mb[4] = _mm_shuffle_epi8(desc[2], shuf_msk1);
+ pkt_mb[5] = _mm_add_epi16(pkt_mb[5], len_adjust);
+ pkt_mb[4] = _mm_add_epi16(pkt_mb[4], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[5]->rx_descriptor_fields1,
+ pkt_mb[5]);
+ _mm_storeu_si128((void *)&rx_pkts[4]->rx_descriptor_fields1,
+ pkt_mb[4]);
+
+ pkt_mb[7] = _mm_shuffle_epi8(desc[3], shuf_msk2);
+ pkt_mb[6] = _mm_shuffle_epi8(desc[3], shuf_msk1);
+ pkt_mb[7] = _mm_add_epi16(pkt_mb[7], len_adjust);
+ pkt_mb[6] = _mm_add_epi16(pkt_mb[6], len_adjust);
+ _mm_storeu_si128((void *)&rx_pkts[7]->rx_descriptor_fields1,
+ pkt_mb[7]);
+ _mm_storeu_si128((void *)&rx_pkts[6]->rx_descriptor_fields1,
+ pkt_mb[6]);
+
+ if (unlikely(nb_used <= RTE_VIRTIO_DESC_PER_LOOP)) {
+ if (sw_ring + nb_used <= sw_ring_end)
+ nb_pkts_received += nb_used;
+ else
+ nb_pkts_received += sw_ring_end - sw_ring;
+ break;
+ } else {
+ if (unlikely(sw_ring + RTE_VIRTIO_DESC_PER_LOOP >=
+ sw_ring_end)) {
+ nb_pkts_received += sw_ring_end - sw_ring;
+ break;
+ } else {
+ nb_pkts_received += RTE_VIRTIO_DESC_PER_LOOP;
+
+ rx_pkts += RTE_VIRTIO_DESC_PER_LOOP;
+ sw_ring += RTE_VIRTIO_DESC_PER_LOOP;
+ rused += RTE_VIRTIO_DESC_PER_LOOP;
+ nb_used -= RTE_VIRTIO_DESC_PER_LOOP;
+ }
+ }
+ }
+
+ vq->vq_used_cons_idx += nb_pkts_received;
+ vq->vq_free_cnt += nb_pkts_received;
+ rxvq->stats.packets += nb_pkts_received;
+ return nb_pkts_received;
+}
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index bba74028..406beeac 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -37,6 +37,7 @@
#include <rte_malloc.h>
#include <rte_kvargs.h>
+#include <rte_vdev.h>
#include "virtio_ethdev.h"
#include "virtio_logs.h"
@@ -277,7 +278,7 @@ virtio_user_eth_dev_alloc(const char *name)
struct virtio_hw *hw;
struct virtio_user_dev *dev;
- eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ eth_dev = rte_eth_dev_allocate(name);
if (!eth_dev) {
PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
return NULL;
@@ -303,6 +304,7 @@ virtio_user_eth_dev_alloc(const char *name)
hw->vtpci_ops = &virtio_user_ops;
hw->use_msix = 0;
hw->modern = 0;
+ hw->use_simple_rxtx = 0;
hw->virtio_user_dev = dev;
data->dev_private = hw;
data->numa_node = SOCKET_ID_ANY;
@@ -329,7 +331,7 @@ virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
* Returns 0 on success.
*/
static int
-virtio_user_pmd_devinit(const char *name, const char *params)
+virtio_user_pmd_probe(const char *name, const char *params)
{
struct rte_kvargs *kvlist = NULL;
struct rte_eth_dev *eth_dev;
@@ -443,7 +445,7 @@ end:
/** Called by rte_eth_dev_detach() */
static int
-virtio_user_pmd_devuninit(const char *name)
+virtio_user_pmd_remove(const char *name)
{
struct rte_eth_dev *eth_dev;
struct virtio_hw *hw;
@@ -471,14 +473,14 @@ virtio_user_pmd_devuninit(const char *name)
return 0;
}
-static struct rte_driver virtio_user_driver = {
- .type = PMD_VDEV,
- .init = virtio_user_pmd_devinit,
- .uninit = virtio_user_pmd_devuninit,
+static struct rte_vdev_driver virtio_user_driver = {
+ .probe = virtio_user_pmd_probe,
+ .remove = virtio_user_pmd_remove,
};
-PMD_REGISTER_DRIVER(virtio_user_driver, virtio_user);
-DRIVER_REGISTER_PARAM_STRING(virtio_user,
+RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver);
+RTE_PMD_REGISTER_ALIAS(net_virtio_user, virtio_user);
+RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
"path=<path> "
"mac=<mac addr> "
"cq=<int> "
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index 6737b81d..f0bb0899 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -44,6 +44,7 @@
#include "virtio_pci.h"
#include "virtio_ring.h"
#include "virtio_logs.h"
+#include "virtio_rxtx.h"
struct rte_mbuf;
@@ -191,6 +192,12 @@ struct virtqueue {
void *vq_ring_virt_mem; /**< linear address of vring*/
unsigned int vq_ring_size;
+ union {
+ struct virtnet_rx rxq;
+ struct virtnet_tx txq;
+ struct virtnet_ctl cq;
+ };
+
phys_addr_t vq_ring_mem; /**< physical address of vring,
* or virtual address for virtio_user. */
@@ -204,7 +211,6 @@ struct virtqueue {
uint16_t vq_queue_index; /**< PCI queue index */
uint16_t offset; /**< relative offset to obtain addr in mbuf */
uint16_t *notify_addr;
- int configured;
struct rte_mbuf **sw_ring; /**< RX software ring. */
struct vq_desc_extra vq_descx[0];
};
@@ -223,6 +229,7 @@ struct virtqueue {
*/
struct virtio_net_hdr {
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /**< Use csum_start,csum_offset*/
+#define VIRTIO_NET_HDR_F_DATA_VALID 2 /**< Checksum is valid */
uint8_t flags;
#define VIRTIO_NET_HDR_GSO_NONE 0 /**< Not a GSO frame */
#define VIRTIO_NET_HDR_GSO_TCPV4 1 /**< GSO frame, IPv4 TCP (TSO) */
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 58742153..8bb13e52 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -81,11 +81,11 @@ static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete);
+ int wait_to_complete);
static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
- struct rte_eth_stats *stats);
+ struct rte_eth_stats *stats);
static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+ struct rte_eth_dev_info *dev_info);
static const uint32_t *
vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
@@ -118,7 +118,7 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
.link_update = vmxnet3_dev_link_update,
.stats_get = vmxnet3_dev_stats_get,
- .mac_addr_set = vmxnet3_mac_addr_set,
+ .mac_addr_set = vmxnet3_mac_addr_set,
.dev_infos_get = vmxnet3_dev_info_get,
.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
.vlan_filter_set = vmxnet3_dev_vlan_filter_set,
@@ -131,20 +131,27 @@ static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
static const struct rte_memzone *
gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
- const char *post_string, int socket_id, uint16_t align)
+ const char *post_string, int socket_id,
+ uint16_t align, bool reuse)
{
char z_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%d_%s",
- dev->driver->pci_drv.name, dev->data->port_id, post_string);
+ dev->driver->pci_drv.driver.name, dev->data->port_id, post_string);
mz = rte_memzone_lookup(z_name);
+ if (!reuse) {
+ if (mz)
+ rte_memzone_free(mz);
+ return rte_memzone_reserve_aligned(z_name, size, socket_id,
+ 0, align);
+ }
+
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, size,
- socket_id, 0, align);
+ return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
}
/**
@@ -194,7 +201,7 @@ vmxnet3_dev_atomic_write_link_status(struct rte_eth_dev *dev,
struct rte_eth_link *src = link;
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
+ *(uint64_t *)src) == 0)
return -1;
return 0;
@@ -212,7 +219,7 @@ vmxnet3_disable_intr(struct vmxnet3_hw *hw)
hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
for (i = 0; i < VMXNET3_MAX_INTRS; i++)
- VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
+ VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
}
/*
@@ -274,8 +281,8 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
/* Getting MAC Address */
mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
- memcpy(hw->perm_addr , &mac_lo, 4);
- memcpy(hw->perm_addr+4, &mac_hi, 2);
+ memcpy(hw->perm_addr, &mac_lo, 4);
+ memcpy(hw->perm_addr + 4, &mac_hi, 2);
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
@@ -328,29 +335,16 @@ eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
static struct eth_driver rte_vmxnet3_pmd = {
.pci_drv = {
- .name = "rte_vmxnet3_pmd",
.id_table = pci_id_vmxnet3_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_vmxnet3_dev_init,
.eth_dev_uninit = eth_vmxnet3_dev_uninit,
.dev_private_size = sizeof(struct vmxnet3_hw),
};
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of Virtual PCI VMXNET3 devices.
- */
-static int
-rte_vmxnet3_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_vmxnet3_pmd);
- return 0;
-}
-
static int
vmxnet3_dev_configure(struct rte_eth_dev *dev)
{
@@ -360,9 +354,16 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
- if (dev->data->nb_rx_queues > UINT8_MAX ||
- dev->data->nb_tx_queues > UINT8_MAX)
+ if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
+ dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
+ PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
return -EINVAL;
+ }
+
+ if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
+ PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
+ return -EINVAL;
+ }
size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
@@ -378,7 +379,7 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
* on current socket
*/
mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
- "shared", rte_socket_id(), 8);
+ "shared", rte_socket_id(), 8, 1);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
@@ -391,10 +392,14 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
/*
* Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
- * on current socket
+ * on current socket.
+ *
+ * We cannot reuse this memzone from previous allocation as its size
+ * depends on the number of tx and rx queues, which could be different
+ * from one config to another.
*/
- mz = gpa_zone_reserve(dev, size, "queuedesc",
- rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
+ mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
+ VMXNET3_QUEUE_DESC_ALIGN, 0);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
@@ -408,10 +413,10 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev)
hw->queue_desc_len = (uint16_t)size;
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
-
/* Allocate memory structure for UPT1_RSSConf and configure */
- mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
- rte_socket_id(), RTE_CACHE_LINE_SIZE);
+ mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
+ "rss_conf", rte_socket_id(),
+ RTE_CACHE_LINE_SIZE, 1);
if (mz == NULL) {
PMD_INIT_LOG(ERR,
"ERROR: Creating rss_conf structure zone");
@@ -459,8 +464,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
/* Setting up Guest OS information */
devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
- VMXNET3_GOS_BITS_32 :
- VMXNET3_GOS_BITS_64;
+ VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
devRead->misc.driverInfo.vmxnet3RevSpt = 1;
devRead->misc.driverInfo.uptVerSpt = 1;
@@ -523,6 +527,11 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
if (dev->data->dev_conf.rxmode.hw_ip_checksum)
devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
+ if (dev->data->dev_conf.rxmode.enable_lro) {
+ devRead->misc.uptFeatures |= VMXNET3_F_LRO;
+ devRead->misc.maxNumRxSG = 0;
+ }
+
if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
ret = vmxnet3_rss_configure(dev);
if (ret != VMXNET3_SUCCESS)
@@ -535,7 +544,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
}
vmxnet3_dev_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
vmxnet3_write_mac(hw, hw->perm_addr);
@@ -550,7 +559,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
static int
vmxnet3_dev_start(struct rte_eth_dev *dev)
{
- int status, ret;
+ int ret;
struct vmxnet3_hw *hw = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
@@ -567,11 +576,11 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
/* Activate device by register write */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
- status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
- if (status != 0) {
+ if (ret != 0) {
PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
- return -1;
+ return -EINVAL;
}
/* Disable interrupts */
@@ -583,7 +592,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
*/
ret = vmxnet3_dev_rxtx_init(dev);
if (ret != VMXNET3_SUCCESS) {
- PMD_INIT_LOG(ERR, "Device receive init: UNSUCCESSFUL");
+ PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
return ret;
}
@@ -598,7 +607,7 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Reading events: 0x%X", events);
vmxnet3_process_events(hw);
#endif
- return status;
+ return VMXNET3_SUCCESS;
}
/*
@@ -664,16 +673,15 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats;
stats->q_opackets[i] = txStats->ucastPktsTxOK +
- txStats->mcastPktsTxOK +
- txStats->bcastPktsTxOK;
+ txStats->mcastPktsTxOK +
+ txStats->bcastPktsTxOK;
stats->q_obytes[i] = txStats->ucastBytesTxOK +
- txStats->mcastBytesTxOK +
- txStats->bcastBytesTxOK;
+ txStats->mcastBytesTxOK +
+ txStats->bcastBytesTxOK;
stats->opackets += stats->q_opackets[i];
stats->obytes += stats->q_obytes[i];
- stats->oerrors += txStats->pktsTxError +
- txStats->pktsTxDiscard;
+ stats->oerrors += txStats->pktsTxError + txStats->pktsTxDiscard;
}
RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
@@ -681,12 +689,12 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats;
stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
- rxStats->mcastPktsRxOK +
- rxStats->bcastPktsRxOK;
+ rxStats->mcastPktsRxOK +
+ rxStats->bcastPktsRxOK;
stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
- rxStats->mcastBytesRxOK +
- rxStats->bcastBytesRxOK;
+ rxStats->mcastBytesRxOK +
+ rxStats->bcastBytesRxOK;
stats->ipackets += stats->q_ipackets[i];
stats->ibytes += stats->q_ibytes[i];
@@ -698,7 +706,7 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
}
static void
-vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev,
+vmxnet3_dev_info_get(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
@@ -725,7 +733,8 @@ vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev,
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
@@ -758,14 +767,16 @@ vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
/* return 0 means link status changed, -1 means not changed */
static int
-vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wait_to_complete)
+vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
struct rte_eth_link old, link;
uint32_t ret;
+ /* Link status doesn't change for stopped dev */
if (dev->data->dev_started == 0)
- return -1; /* Link status doesn't change for stopped dev */
+ return -1;
memset(&link, 0, sizeof(link));
vmxnet3_dev_atomic_read_link_status(dev, &old);
@@ -787,8 +798,8 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wai
/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
static void
-vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) {
-
+vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
+{
struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
if (set)
@@ -921,11 +932,13 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
/* Check if link state has changed */
if (events & VMXNET3_ECR_LINK)
PMD_INIT_LOG(ERR,
- "Process events in %s(): VMXNET3_ECR_LINK event", __func__);
+ "Process events in %s(): VMXNET3_ECR_LINK event",
+ __func__);
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
- VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_QUEUE_STATUS);
if (hw->tqd_start->status.stopped)
PMD_INIT_LOG(ERR, "tq error 0x%x",
@@ -944,14 +957,8 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
if (events & VMXNET3_ECR_DEBUG)
PMD_INIT_LOG(ERR, "Debug event generated by device.");
-
}
#endif
-static struct rte_driver rte_vmxnet3_driver = {
- .type = PMD_PDEV,
- .init = rte_vmxnet3_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_vmxnet3_driver, vmxnet3);
-DRIVER_REGISTER_PCI_TABLE(vmxnet3, pci_id_vmxnet3_map);
+RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 1be833ab..7d3b11ee 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -62,8 +62,7 @@
ETH_RSS_NONFRAG_IPV6_TCP)
/* RSS configuration structure - shared with device through GPA */
-typedef
-struct VMXNET3_RSSConf {
+typedef struct VMXNET3_RSSConf {
uint16_t hashType;
uint16_t hashFunc;
uint16_t hashKeySize;
@@ -76,15 +75,13 @@ struct VMXNET3_RSSConf {
uint8_t indTable[VMXNET3_RSS_MAX_IND_TABLE_SIZE];
} VMXNET3_RSSConf;
-typedef
-struct vmxnet3_mf_table {
+typedef struct vmxnet3_mf_table {
void *mfTableBase; /* Multicast addresses list */
uint64_t mfTablePA; /* Physical address of the list */
uint16_t num_addrs; /* number of multicast addrs */
} vmxnet3_mf_table_t;
struct vmxnet3_hw {
-
uint8_t *hw_addr0; /* BAR0: PT-Passthrough Regs */
uint8_t *hw_addr1; /* BAR1: VD-Virtual Device Regs */
/* BAR2: MSI-X Regs */
@@ -111,10 +108,10 @@ struct vmxnet3_hw {
uint64_t queueDescPA;
uint16_t queue_desc_len;
- VMXNET3_RSSConf *rss_conf;
- uint64_t rss_confPA;
- vmxnet3_mf_table_t *mf_table;
- uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
+ VMXNET3_RSSConf *rss_conf;
+ uint64_t rss_confPA;
+ vmxnet3_mf_table_t *mf_table;
+ uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
};
@@ -125,7 +122,8 @@ struct vmxnet3_hw {
#define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
-static inline uint32_t vmxnet3_read_addr(volatile void *addr)
+static inline uint32_t
+vmxnet3_read_addr(volatile void *addr)
{
return VMXNET3_PCI_REG(addr);
}
@@ -158,20 +156,20 @@ void vmxnet3_dev_rx_queue_release(void *rxq);
void vmxnet3_dev_tx_queue_release(void *txq);
int vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
- uint16_t nb_rx_desc, unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mb_pool);
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
int vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
- uint16_t nb_tx_desc, unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf);
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
int vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev);
int vmxnet3_rss_configure(struct rte_eth_dev *dev);
uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+ uint16_t nb_pkts);
uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+ uint16_t nb_pkts);
#endif /* _VMXNET3_ETHDEV_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
index 69ff2ded..b50d2b00 100644
--- a/drivers/net/vmxnet3/vmxnet3_ring.h
+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -96,12 +96,12 @@ vmxnet3_cmd_ring_desc_empty(struct vmxnet3_cmd_ring *ring)
}
typedef struct vmxnet3_comp_ring {
- uint32_t size;
- uint32_t next2proc;
- uint8_t gen;
- uint8_t intr_idx;
+ uint32_t size;
+ uint32_t next2proc;
+ uint8_t gen;
+ uint8_t intr_idx;
Vmxnet3_GenericDesc *base;
- uint64_t basePA;
+ uint64_t basePA;
} vmxnet3_comp_ring_t;
struct vmxnet3_data_ring {
@@ -121,13 +121,13 @@ vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
}
struct vmxnet3_txq_stats {
- uint64_t drop_total; /* # of pkts dropped by the driver,
+ uint64_t drop_total; /* # of pkts dropped by the driver,
* the counters below track droppings due to
* different reasons
*/
- uint64_t drop_too_many_segs;
- uint64_t drop_tso;
- uint64_t tx_ring_full;
+ uint64_t drop_too_many_segs;
+ uint64_t drop_tso;
+ uint64_t tx_ring_full;
};
typedef struct vmxnet3_tx_queue {
@@ -158,8 +158,8 @@ typedef struct vmxnet3_rx_queue {
uint32_t qid1;
uint32_t qid2;
Vmxnet3_RxQueueDesc *shared;
- struct rte_mbuf *start_seg;
- struct rte_mbuf *last_seg;
+ struct rte_mbuf *start_seg;
+ struct rte_mbuf *last_seg;
struct vmxnet3_rxq_stats stats;
bool stopped;
uint16_t queue_id; /**< Device RX queue index. */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 88df576c..b1091688 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -57,7 +57,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
@@ -183,7 +182,6 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
ring->buf_info = NULL;
}
-
void
vmxnet3_dev_tx_queue_release(void *txq)
{
@@ -416,7 +414,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
continue;
}
- if (txm->nb_segs == 1 && rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+ if (txm->nb_segs == 1 &&
+ rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
struct Vmxnet3_TxDataDesc *tdd;
tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
@@ -438,8 +437,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
if (copy_size)
gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
- txq->cmd_ring.next2fill *
- sizeof(struct Vmxnet3_TxDataDesc));
+ txq->cmd_ring.next2fill *
+ sizeof(struct Vmxnet3_TxDataDesc));
else
gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
@@ -522,13 +521,12 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*
* Allocates mbufs and clusters. Post rx descriptors with buffer details
* so that device can receive packets in those buffers.
- * Ring layout:
- * Among the two rings, 1st ring contains buffers of type 0 and type1.
+ * Ring layout:
+ * Among the two rings, 1st ring contains buffers of type 0 and type 1.
* bufs_per_pkt is set such that for non-LRO cases all the buffers required
* by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
* 2nd ring contains buffers of type 1 alone. Second ring mostly be used
* only for LRO.
- *
*/
static int
vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
@@ -573,8 +571,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len -
RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA =
- rte_mbuf_data_dma_addr_default(mbuf);
+ buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
/* Load Rx Descriptor with the buffer's GPA */
rxd->addr = buf_info->bufPA;
@@ -700,7 +697,6 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
goto rcd_done;
}
-
/* Initialize newly received packet buffer */
rxm->port = rxq->port_id;
rxm->nb_segs = 1;
@@ -760,7 +756,8 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rcd_done:
rxq->cmd_ring[ring_idx].next2comp = idx;
- VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);
+ VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
+ rxq->cmd_ring[ring_idx].size);
/* It's time to allocate some new buf and renew descriptors */
vmxnet3_post_rx_bufs(rxq, ring_idx);
@@ -775,8 +772,7 @@ rcd_done:
rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
nb_rxd++;
if (nb_rxd > rxq->cmd_ring[0].size) {
- PMD_RX_LOG(ERR,
- "Used up quota of receiving packets,"
+ PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
" relinquish control.");
break;
}
@@ -798,15 +794,15 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.name, ring_name,
- dev->data->port_id, queue_id);
+ dev->driver->pci_drv.driver.name, ring_name,
+ dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
if (mz)
return mz;
return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, VMXNET3_RING_BA_ALIGN);
+ socket_id, 0, VMXNET3_RING_BA_ALIGN);
}
int
@@ -814,7 +810,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
+ __rte_unused const struct rte_eth_txconf *tx_conf)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
const struct rte_memzone *mz;
@@ -832,7 +828,8 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -EINVAL;
}
- txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE);
+ txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
+ RTE_CACHE_LINE_SIZE);
if (txq == NULL) {
PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
return -ENOMEM;
@@ -915,12 +912,12 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
const struct rte_memzone *mz;
struct vmxnet3_rx_queue *rxq;
- struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
struct vmxnet3_comp_ring *comp_ring;
int size;
@@ -929,7 +926,8 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
PMD_INIT_FUNC_TRACE();
- rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE);
+ rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
+ RTE_CACHE_LINE_SIZE);
if (rxq == NULL) {
PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
return -ENOMEM;
@@ -1003,7 +1001,9 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
ring->rid = i;
snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
- ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
+ ring->buf_info = rte_zmalloc(mem_name,
+ ring->size * sizeof(vmxnet3_buf_info_t),
+ RTE_CACHE_LINE_SIZE);
if (ring->buf_info == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
return -ENOMEM;
@@ -1037,10 +1037,15 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
/* Passing 0 as alloc_num will allocate full ring */
ret = vmxnet3_post_rx_bufs(rxq, j);
if (ret <= 0) {
- PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j);
+ PMD_INIT_LOG(ERR,
+ "ERROR: Posting Rxq: %d buffers ring: %d",
+ i, j);
return -ret;
}
- /* Updating device with the index:next2fill to fill the mbufs for coming packets */
+ /*
+ * Updating device with the index:next2fill to fill the
+ * mbufs for coming packets.
+ */
if (unlikely(rxq->shared->ctrl.updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
rxq->cmd_ring[j].next2fill);
@@ -1088,7 +1093,7 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
/* loading hashKeySize */
dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
- /* loading indTableSize : Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
+ /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
if (port_rss_conf->rss_key == NULL) {
@@ -1097,7 +1102,8 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
}
/* loading hashKey */
- memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, dev_rss_conf->hashKeySize);
+ memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
+ dev_rss_conf->hashKeySize);
/* loading indTable */
for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.c b/drivers/net/xenvirt/rte_eth_xenvirt.c
index 99f6cc81..c08a0568 100644
--- a/drivers/net/xenvirt/rte_eth_xenvirt.c
+++ b/drivers/net/xenvirt/rte_eth_xenvirt.c
@@ -56,7 +56,7 @@
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <cmdline_parse.h>
#include <cmdline_parse_etheraddr.h>
@@ -654,7 +654,7 @@ eth_dev_xenvirt_create(const char *name, const char *params,
goto err;
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
+ eth_dev = rte_eth_dev_allocate(name);
if (eth_dev == NULL)
goto err;
@@ -729,7 +729,7 @@ eth_dev_xenvirt_free(const char *name, const unsigned numa_node)
/*TODO: Support multiple process model */
static int
-rte_pmd_xenvirt_devinit(const char *name, const char *params)
+rte_pmd_xenvirt_probe(const char *name, const char *params)
{
if (virtio_idx == 0) {
if (xenstore_init() != 0) {
@@ -746,7 +746,7 @@ rte_pmd_xenvirt_devinit(const char *name, const char *params)
}
static int
-rte_pmd_xenvirt_devuninit(const char *name)
+rte_pmd_xenvirt_remove(const char *name)
{
eth_dev_xenvirt_free(name, rte_socket_id());
@@ -759,12 +759,12 @@ rte_pmd_xenvirt_devuninit(const char *name)
return 0;
}
-static struct rte_driver pmd_xenvirt_drv = {
- .type = PMD_VDEV,
- .init = rte_pmd_xenvirt_devinit,
- .uninit = rte_pmd_xenvirt_devuninit,
+static struct rte_vdev_driver pmd_xenvirt_drv = {
+ .probe = rte_pmd_xenvirt_probe,
+ .remove = rte_pmd_xenvirt_remove,
};
-PMD_REGISTER_DRIVER(pmd_xenvirt_drv, eth_xenvirt);
-DRIVER_REGISTER_PARAM_STRING(eth_xenvirt,
+RTE_PMD_REGISTER_VDEV(net_xenvirt, pmd_xenvirt_drv);
+RTE_PMD_REGISTER_ALIAS(net_xenvirt, eth_xenvirt);
+RTE_PMD_REGISTER_PARAM_STRING(net_xenvirt,
"mac=<mac addr>");
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.h b/drivers/net/xenvirt/rte_eth_xenvirt.h
index 4995a9b4..598adc6f 100644
--- a/drivers/net/xenvirt/rte_eth_xenvirt.h
+++ b/drivers/net/xenvirt/rte_eth_xenvirt.h
@@ -39,7 +39,6 @@ extern "C" {
#endif
#include <rte_mempool.h>
-#include <rte_ring.h>
/**
* Creates mempool for xen virtio PMD.