aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2019-02-26 09:17:37 +0100
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2019-02-26 09:21:27 +0100
commit597cb1874068054d4c0be41f161a72ef37888930 (patch)
tree8899c19634bd8e393a8eac05f33925e4d75bd77d
parent6e7cbd63706f3435b9d9a2057a37db1da01db9a7 (diff)
New upstream version 17.11.5upstream-17.11-stable
Change-Id: I8d2aa1aee2a9a78614dff5a01008f91e88e810c7 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
-rw-r--r--app/pdump/main.c10
-rw-r--r--app/proc_info/main.c8
-rw-r--r--app/test-crypto-perf/cperf_test_vectors.c22
-rw-r--r--app/test-crypto-perf/main.c8
-rw-r--r--app/test-eventdev/test_order_atq.c2
-rw-r--r--app/test-eventdev/test_order_queue.c2
-rw-r--r--app/test-eventdev/test_perf_atq.c2
-rw-r--r--app/test-eventdev/test_perf_queue.c2
-rw-r--r--app/test-pmd/cmdline.c8
-rw-r--r--app/test-pmd/cmdline_mtr.c8
-rw-r--r--app/test-pmd/cmdline_tm.c34
-rw-r--r--app/test-pmd/csumonly.c8
-rw-r--r--app/test-pmd/parameters.c14
-rw-r--r--app/test-pmd/rxonly.c2
-rw-r--r--app/test-pmd/testpmd.c51
-rw-r--r--app/test-pmd/tm.c2
-rw-r--r--config/common_base4
-rwxr-xr-xdevtools/check-git-log.sh4
-rw-r--r--devtools/cocci/strlcpy.cocci8
-rw-r--r--doc/guides/contributing/documentation.rst4
-rw-r--r--doc/guides/contributing/patches.rst10
-rw-r--r--doc/guides/contributing/stable.rst6
-rw-r--r--doc/guides/cryptodevs/features/qat.ini3
-rw-r--r--doc/guides/cryptodevs/qat.rst1
-rw-r--r--doc/guides/freebsd_gsg/install_from_ports.rst2
-rw-r--r--doc/guides/howto/flow_bifurcation.rst2
-rw-r--r--doc/guides/howto/rte_flow.rst32
-rw-r--r--doc/guides/linux_gsg/nic_perf_intel_platform.rst2
-rw-r--r--doc/guides/linux_gsg/sys_reqs.rst6
-rw-r--r--doc/guides/nics/ena.rst15
-rw-r--r--doc/guides/nics/enic.rst2
-rw-r--r--doc/guides/nics/features.rst4
-rw-r--r--doc/guides/nics/features/ena.ini1
-rw-r--r--doc/guides/nics/features/failsafe.ini1
-rw-r--r--doc/guides/nics/mlx5.rst81
-rw-r--r--doc/guides/nics/virtio.rst2
-rw-r--r--doc/guides/rel_notes/release_17_11.rst224
-rw-r--r--doc/guides/sample_app_ug/compiling.rst15
-rw-r--r--doc/guides/sample_app_ug/ip_reassembly.rst4
-rw-r--r--doc/guides/sample_app_ug/ipv4_multicast.rst1
-rw-r--r--doc/guides/sample_app_ug/vhost.rst2
-rw-r--r--doc/guides/testpmd_app_ug/run_app.rst2
-rw-r--r--doc/guides/testpmd_app_ug/testpmd_funcs.rst9
-rw-r--r--drivers/bus/dpaa/base/fman/fman.c3
-rw-r--r--drivers/bus/dpaa/base/qbman/qman.c14
-rw-r--r--drivers/bus/dpaa/include/fsl_qman.h24
-rw-r--r--drivers/bus/pci/linux/pci.c41
-rw-r--r--drivers/bus/pci/linux/pci_uio.c2
-rw-r--r--drivers/bus/pci/linux/pci_vfio.c12
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c20
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_pmd_ops.c46
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c3
-rw-r--r--drivers/event/sw/sw_evdev_scheduler.c13
-rw-r--r--drivers/net/bnx2x/bnx2x.c842
-rw-r--r--drivers/net/bnx2x/bnx2x.h39
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c98
-rw-r--r--drivers/net/bnx2x/bnx2x_logs.h27
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c17
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c35
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.c50
-rw-r--r--drivers/net/bnx2x/ecore_init.h6
-rw-r--r--drivers/net/bnx2x/ecore_init_ops.h8
-rw-r--r--drivers/net/bnx2x/ecore_reg.h1
-rw-r--r--drivers/net/bnx2x/ecore_sp.c258
-rw-r--r--drivers/net/bnx2x/ecore_sp.h4
-rw-r--r--drivers/net/bnx2x/elink.c761
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c12
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c4
-rw-r--r--drivers/net/bnxt/bnxt_txr.c5
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c114
-rw-r--r--drivers/net/e1000/base/e1000_i210.c1
-rw-r--r--drivers/net/e1000/em_rxtx.c7
-rw-r--r--drivers/net/e1000/igb_rxtx.c4
-rw-r--r--drivers/net/ena/ena_ethdev.c2
-rw-r--r--drivers/net/enic/base/vnic_devcmd.h16
-rw-r--r--drivers/net/enic/enic_flow.c2
-rw-r--r--drivers/net/enic/enic_rxtx.c6
-rw-r--r--drivers/net/failsafe/failsafe_ops.c30
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c11
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c7
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h3
-rw-r--r--drivers/net/i40e/base/i40e_common.c2
-rw-r--r--drivers/net/i40e/base/i40e_lan_hmc.c15
-rw-r--r--drivers/net/i40e/i40e_ethdev.c89
-rw-r--r--drivers/net/i40e/i40e_rxtx.c6
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c79
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c4
-rw-r--r--drivers/net/mlx4/mlx4_ethdev.c2
-rw-r--r--drivers/net/mlx5/mlx5.c39
-rw-r--r--drivers/net/mlx5/mlx5.h20
-rw-r--r--drivers/net/mlx5/mlx5_defs.h15
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c5
-rw-r--r--drivers/net/mlx5/mlx5_mr.c583
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c38
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c3
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h188
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.c2
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec.h6
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_neon.h2
-rw-r--r--drivers/net/mlx5/mlx5_rxtx_vec_sse.h2
-rw-r--r--drivers/net/mlx5/mlx5_stats.c3
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c14
-rw-r--r--drivers/net/mlx5/mlx5_txq.c23
-rw-r--r--drivers/net/mrvl/mrvl_ethdev.c3
-rw-r--r--drivers/net/nfp/nfp_net.c53
-rw-r--r--drivers/net/nfp/nfp_net_pmd.h2
-rw-r--r--drivers/net/nfp/nfp_nspu.c5
-rw-r--r--drivers/net/octeontx/base/octeontx_pki_var.h13
-rw-r--r--drivers/net/octeontx/octeontx_ethdev.c23
-rw-r--r--drivers/net/octeontx/octeontx_rxtx.c1
-rw-r--r--drivers/net/qede/base/bcm_osal.h1
-rw-r--r--drivers/net/qede/base/ecore_dev.c27
-rw-r--r--drivers/net/qede/base/ecore_int.c40
-rw-r--r--drivers/net/qede/base/ecore_int.h1
-rw-r--r--drivers/net/qede/base/ecore_mcp.c46
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h4
-rw-r--r--drivers/net/qede/base/mcp_public.h3
-rw-r--r--drivers/net/qede/base/reg_addr.h20
-rw-r--r--drivers/net/qede/qede_ethdev.c28
-rw-r--r--drivers/net/qede/qede_fdir.c12
-rw-r--r--drivers/net/qede/qede_main.c6
-rw-r--r--drivers/net/qede/qede_rxtx.c88
-rw-r--r--drivers/net/qede/qede_rxtx.h22
-rw-r--r--drivers/net/sfc/base/ef10_ev.c28
-rw-r--r--drivers/net/sfc/base/ef10_filter.c64
-rw-r--r--drivers/net/sfc/base/ef10_impl.h2
-rw-r--r--drivers/net/sfc/base/ef10_intr.c5
-rw-r--r--drivers/net/sfc/base/ef10_mac.c24
-rw-r--r--drivers/net/sfc/base/ef10_nic.c79
-rw-r--r--drivers/net/sfc/base/ef10_nvram.c6
-rw-r--r--drivers/net/sfc/base/ef10_phy.c35
-rw-r--r--drivers/net/sfc/base/ef10_rx.c35
-rw-r--r--drivers/net/sfc/base/ef10_tx.c18
-rw-r--r--drivers/net/sfc/base/efx_impl.h3
-rw-r--r--drivers/net/sfc/base/efx_lic.c74
-rw-r--r--drivers/net/sfc/base/efx_mac.c9
-rw-r--r--drivers/net/sfc/base/efx_mcdi.c83
-rw-r--r--drivers/net/sfc/base/efx_mcdi.h11
-rw-r--r--drivers/net/sfc/base/efx_nic.c8
-rw-r--r--drivers/net/sfc/base/efx_nvram.c69
-rw-r--r--drivers/net/sfc/base/efx_phy.c2
-rw-r--r--drivers/net/sfc/base/efx_port.c3
-rw-r--r--drivers/net/sfc/base/mcdi_mon.c24
-rw-r--r--drivers/net/sfc/base/medford_nic.c5
-rw-r--r--drivers/net/sfc/base/siena_mac.c9
-rw-r--r--drivers/net/sfc/base/siena_nic.c5
-rw-r--r--drivers/net/sfc/base/siena_nvram.c5
-rw-r--r--drivers/net/sfc/base/siena_phy.c28
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c16
-rw-r--r--drivers/net/sfc/sfc_ethdev.c17
-rw-r--r--drivers/net/sfc/sfc_rx.c5
-rw-r--r--drivers/net/sfc/sfc_tx.c2
-rw-r--r--drivers/net/softnic/rte_eth_softnic.c21
-rw-r--r--drivers/net/tap/rte_eth_tap.c2
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c10
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c5
-rw-r--r--drivers/net/virtio/virtio_ethdev.c26
-rw-r--r--drivers/net/virtio/virtio_pci.c75
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel.c19
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c68
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.h3
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c18
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c2
-rw-r--r--examples/flow_filtering/flow_blocks.c21
-rw-r--r--examples/ipv4_multicast/main.c2
-rw-r--r--examples/vhost/main.c3
-rw-r--r--lib/librte_acl/rte_acl.h2
-rw-r--r--lib/librte_eal/common/arch/x86/rte_memcpy.c58
-rw-r--r--lib/librte_eal/common/eal_common_options.c2
-rw-r--r--lib/librte_eal/common/include/arch/arm/rte_vect.h6
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_memcpy.h2
-rw-r--r--lib/librte_eal/common/include/arch/x86/rte_spinlock.h4
-rw-r--r--lib/librte_eal/common/include/generic/rte_byteorder.h6
-rw-r--r--lib/librte_eal/common/include/generic/rte_rwlock.h4
-rw-r--r--lib/librte_eal/common/include/rte_bitmap.h14
-rw-r--r--lib/librte_eal/common/include/rte_common.h30
-rw-r--r--lib/librte_eal/common/include/rte_dev.h15
-rw-r--r--lib/librte_eal/common/include/rte_lcore.h2
-rw-r--r--lib/librte_eal/common/include/rte_random.h6
-rw-r--r--lib/librte_eal/common/include/rte_string_fns.h31
-rw-r--r--lib/librte_eal/common/include/rte_version.h2
-rw-r--r--lib/librte_eal/linuxapp/eal/eal.c8
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_interrupts.c19
-rw-r--r--lib/librte_eal/linuxapp/eal/eal_memory.c21
-rw-r--r--lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h1
-rw-r--r--lib/librte_eal/linuxapp/igb_uio/igb_uio.c33
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c8
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h2
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c8
-rw-r--r--lib/librte_eal/linuxapp/kni/kni_ethtool.c10
-rw-r--r--lib/librte_eal/linuxapp/kni/kni_fifo.h24
-rw-r--r--lib/librte_efd/rte_efd.c3
-rw-r--r--lib/librte_ether/rte_ethdev.c26
-rw-r--r--lib/librte_ether/rte_ethdev.h29
-rw-r--r--lib/librte_eventdev/rte_event_eth_rx_adapter.c6
-rw-r--r--lib/librte_gro/gro_tcp4.c7
-rw-r--r--lib/librte_hash/rte_hash_crc.h11
-rw-r--r--lib/librte_ip_frag/ip_frag_common.h19
-rw-r--r--lib/librte_ip_frag/rte_ip_frag.h14
-rw-r--r--lib/librte_kni/rte_kni.c2
-rw-r--r--lib/librte_kvargs/rte_kvargs.c3
-rw-r--r--lib/librte_kvargs/rte_kvargs.h7
-rw-r--r--lib/librte_latencystats/rte_latencystats.c7
-rw-r--r--lib/librte_mbuf/rte_mbuf.h50
-rw-r--r--lib/librte_net/rte_ether.h5
-rw-r--r--lib/librte_net/rte_gre.h1
-rw-r--r--lib/librte_net/rte_ip.h14
-rw-r--r--lib/librte_net/rte_net.h20
-rw-r--r--lib/librte_pci/rte_pci.c4
-rw-r--r--lib/librte_ring/rte_ring.h14
-rw-r--r--lib/librte_sched/rte_reciprocal.c17
-rw-r--r--lib/librte_table/rte_table_hash_cuckoo.c2
-rw-r--r--lib/librte_vhost/vhost_user.c2
-rw-r--r--lib/librte_vhost/virtio_net.c6
-rw-r--r--mk/rte.app.mk3
-rw-r--r--mk/rte.cpuflags.mk7
-rw-r--r--pkg/dpdk.spec2
-rw-r--r--test/test/test_acl.h18
-rw-r--r--test/test/test_bitmap.c18
-rw-r--r--test/test/test_common.c32
-rw-r--r--test/test/test_cryptodev.c6
-rw-r--r--test/test/test_eventdev.c21
-rw-r--r--test/test/test_hash.c24
-rw-r--r--test/test/test_hash_perf.c3
-rw-r--r--test/test/test_pmd_ring_perf.c8
-rw-r--r--test/test/test_reorder.c2
-rwxr-xr-xusertools/dpdk-devbind.py7
229 files changed, 3813 insertions, 2596 deletions
diff --git a/app/pdump/main.c b/app/pdump/main.c
index 66272f59..8e42b364 100644
--- a/app/pdump/main.c
+++ b/app/pdump/main.c
@@ -148,8 +148,8 @@ struct pdump_tuples {
/* params for packet dumping */
enum pdump_by dump_by_type;
- int rx_vdev_id;
- int tx_vdev_id;
+ uint16_t rx_vdev_id;
+ uint16_t tx_vdev_id;
enum pcap_stream rx_vdev_stream_type;
enum pcap_stream tx_vdev_stream_type;
bool single_pdump_dev;
@@ -301,7 +301,7 @@ parse_pdump(const char *optarg)
&parse_uint_value, &v);
if (ret < 0)
goto free_kvlist;
- pt->port = (uint8_t) v.val;
+ pt->port = (uint16_t) v.val;
pt->dump_by_type = PORT_ID;
} else if (cnt2 == 1) {
ret = rte_kvargs_process(kvlist, PDUMP_PCI_ARG,
@@ -489,7 +489,7 @@ disable_pdump(struct pdump_tuples *pt)
}
static inline void
-pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)
+pdump_rxtx(struct rte_ring *ring, uint16_t vdev_id, struct pdump_stats *stats)
{
/* write input packets of port to vdev for pdump */
struct rte_mbuf *rxtx_bufs[BURST_SIZE];
@@ -516,7 +516,7 @@ pdump_rxtx(struct rte_ring *ring, uint8_t vdev_id, struct pdump_stats *stats)
}
static void
-free_ring_data(struct rte_ring *ring, uint8_t vdev_id,
+free_ring_data(struct rte_ring *ring, uint16_t vdev_id,
struct pdump_stats *stats)
{
while (rte_ring_count(ring))
diff --git a/app/proc_info/main.c b/app/proc_info/main.c
index 875d91ea..2893bec6 100644
--- a/app/proc_info/main.c
+++ b/app/proc_info/main.c
@@ -517,14 +517,18 @@ nic_xstats_display(uint16_t port_id)
if (enable_collectd_format) {
char counter_type[MAX_STRING_LEN];
char buf[MAX_STRING_LEN];
+ size_t n;
collectd_resolve_cnt_type(counter_type,
sizeof(counter_type),
xstats_names[i].name);
- sprintf(buf, "PUTVAL %s/dpdkstat-port.%u/%s-%s N:%"
+ n = snprintf(buf, MAX_STRING_LEN,
+ "PUTVAL %s/dpdkstat-port.%u/%s-%s N:%"
PRIu64"\n", host_id, port_id, counter_type,
xstats_names[i].name, values[i]);
- ret = write(stdout_fd, buf, strlen(buf));
+ if (n > sizeof(buf) - 1)
+ n = sizeof(buf) - 1;
+ ret = write(stdout_fd, buf, n);
if (ret < 0)
goto err;
} else {
diff --git a/app/test-crypto-perf/cperf_test_vectors.c b/app/test-crypto-perf/cperf_test_vectors.c
index fa911ff6..c048652d 100644
--- a/app/test-crypto-perf/cperf_test_vectors.c
+++ b/app/test-crypto-perf/cperf_test_vectors.c
@@ -447,13 +447,19 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
t_vec->cipher_key.length = 0;
t_vec->ciphertext.data = plaintext;
t_vec->cipher_key.data = NULL;
- t_vec->cipher_iv.data = NULL;
} else {
t_vec->cipher_key.length = options->cipher_key_sz;
t_vec->ciphertext.data = ciphertext;
t_vec->cipher_key.data = cipher_key;
- t_vec->cipher_iv.data = rte_malloc(NULL, options->cipher_iv_sz,
- 16);
+ }
+
+ /* Init IV data ptr */
+ t_vec->cipher_iv.data = NULL;
+
+ if (options->cipher_iv_sz != 0) {
+ /* Set IV parameters */
+ t_vec->cipher_iv.data = rte_malloc(NULL,
+ options->cipher_iv_sz, 16);
if (t_vec->cipher_iv.data == NULL) {
rte_free(t_vec);
return NULL;
@@ -461,17 +467,7 @@ cperf_test_vector_get_dummy(struct cperf_options *options)
memcpy(t_vec->cipher_iv.data, iv, options->cipher_iv_sz);
}
t_vec->ciphertext.length = options->max_buffer_size;
-
- /* Set IV parameters */
- t_vec->cipher_iv.data = rte_malloc(NULL, options->cipher_iv_sz,
- 16);
- if (options->cipher_iv_sz && t_vec->cipher_iv.data == NULL) {
- rte_free(t_vec);
- return NULL;
- }
- memcpy(t_vec->cipher_iv.data, iv, options->cipher_iv_sz);
t_vec->cipher_iv.length = options->cipher_iv_sz;
-
t_vec->data.cipher_offset = 0;
t_vec->data.cipher_length = options->max_buffer_size;
diff --git a/app/test-crypto-perf/main.c b/app/test-crypto-perf/main.c
index 13e01218..be861cc0 100644
--- a/app/test-crypto-perf/main.c
+++ b/app/test-crypto-perf/main.c
@@ -324,7 +324,9 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->ciphertext.length < opts->max_buffer_size)
return -1;
- if (test_vec->cipher_iv.data == NULL)
+ /* Cipher IV is only required for some algorithms */
+ if (opts->cipher_iv_sz &&
+ test_vec->cipher_iv.data == NULL)
return -1;
if (test_vec->cipher_iv.length != opts->cipher_iv_sz)
return -1;
@@ -339,7 +341,9 @@ cperf_check_test_vector(struct cperf_options *opts,
return -1;
if (test_vec->plaintext.length < opts->max_buffer_size)
return -1;
- if (test_vec->auth_key.data == NULL)
+ /* Auth key is only required for some algorithms */
+ if (opts->auth_key_sz &&
+ test_vec->auth_key.data == NULL)
return -1;
if (test_vec->auth_key.length != opts->auth_key_sz)
return -1;
diff --git a/app/test-eventdev/test_order_atq.c b/app/test-eventdev/test_order_atq.c
index 4ee0dea8..01ee9ea3 100644
--- a/app/test-eventdev/test_order_atq.c
+++ b/app/test-eventdev/test_order_atq.c
@@ -35,7 +35,7 @@
#include "test_order_common.h"
-/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
static inline __attribute__((always_inline)) void
order_atq_process_stage_0(struct rte_event *const ev)
diff --git a/app/test-eventdev/test_order_queue.c b/app/test-eventdev/test_order_queue.c
index eef69a4c..84edeab5 100644
--- a/app/test-eventdev/test_order_queue.c
+++ b/app/test-eventdev/test_order_queue.c
@@ -35,7 +35,7 @@
#include "test_order_common.h"
-/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
static inline __attribute__((always_inline)) void
order_queue_process_stage_0(struct rte_event *const ev)
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index 0e9f2db0..1088c11a 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -32,7 +32,7 @@
#include "test_perf_common.h"
-/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
static inline int
atq_nb_event_queues(struct evt_options *opt)
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index d843eea1..cddf56c3 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -32,7 +32,7 @@
#include "test_perf_common.h"
-/* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
static inline int
perf_queue_nb_event_queues(struct evt_options *opt)
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 77c11b84..a4bd5825 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -729,7 +729,8 @@ static void cmd_help_long_parsed(void *parsed_result,
" show all queue region related configuration info\n\n"
"add port tm node shaper profile (port_id) (shaper_profile_id)"
- " (tb_rate) (tb_size) (packet_length_adjust)\n"
+ " (cmit_tb_rate) (cmit_tb_size) (peak_tb_rate) (peak_tb_size)"
+ " (packet_length_adjust)\n"
" Add port tm node private shaper profile.\n\n"
"del port tm node shaper profile (port_id) (shaper_profile_id)\n"
@@ -3796,7 +3797,7 @@ cmdline_parse_token_string_t cmd_csum_tunnel_csum =
csum, "csum");
cmdline_parse_token_string_t cmd_csum_tunnel_parse =
TOKEN_STRING_INITIALIZER(struct cmd_csum_tunnel_result,
- parse, "parse_tunnel");
+ parse, "parse-tunnel");
cmdline_parse_token_string_t cmd_csum_tunnel_onoff =
TOKEN_STRING_INITIALIZER(struct cmd_csum_tunnel_result,
onoff, "on#off");
@@ -3807,7 +3808,7 @@ cmdline_parse_token_num_t cmd_csum_tunnel_portid =
cmdline_parse_inst_t cmd_csum_tunnel = {
.f = cmd_csum_tunnel_parsed,
.data = NULL,
- .help_str = "csum parse_tunnel on|off <port_id>: "
+ .help_str = "csum parse-tunnel on|off <port_id>: "
"Enable/Disable parsing of tunnels for csum engine",
.tokens = {
(void *)&cmd_csum_tunnel_csum,
@@ -7052,7 +7053,6 @@ static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
struct cmdline *cl,
__attribute__((unused)) void *data)
{
- pmd_test_exit();
cmdline_quit(cl);
}
diff --git a/app/test-pmd/cmdline_mtr.c b/app/test-pmd/cmdline_mtr.c
index d8d806d7..9666789b 100644
--- a/app/test-pmd/cmdline_mtr.c
+++ b/app/test-pmd/cmdline_mtr.c
@@ -192,9 +192,9 @@ cmdline_parse_inst_t cmd_add_port_meter_profile_srtcm = {
(void *)&cmd_add_port_meter_profile_srtcm_port,
(void *)&cmd_add_port_meter_profile_srtcm_meter,
(void *)&cmd_add_port_meter_profile_srtcm_profile,
+ (void *)&cmd_add_port_meter_profile_srtcm_srtcm_rfc2697,
(void *)&cmd_add_port_meter_profile_srtcm_port_id,
(void *)&cmd_add_port_meter_profile_srtcm_profile_id,
- (void *)&cmd_add_port_meter_profile_srtcm_srtcm_rfc2697,
(void *)&cmd_add_port_meter_profile_srtcm_cir,
(void *)&cmd_add_port_meter_profile_srtcm_cbs,
(void *)&cmd_add_port_meter_profile_srtcm_ebs,
@@ -299,9 +299,9 @@ cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm = {
(void *)&cmd_add_port_meter_profile_trtcm_port,
(void *)&cmd_add_port_meter_profile_trtcm_meter,
(void *)&cmd_add_port_meter_profile_trtcm_profile,
+ (void *)&cmd_add_port_meter_profile_trtcm_trtcm_rfc2698,
(void *)&cmd_add_port_meter_profile_trtcm_port_id,
(void *)&cmd_add_port_meter_profile_trtcm_profile_id,
- (void *)&cmd_add_port_meter_profile_trtcm_trtcm_rfc2698,
(void *)&cmd_add_port_meter_profile_trtcm_cir,
(void *)&cmd_add_port_meter_profile_trtcm_pir,
(void *)&cmd_add_port_meter_profile_trtcm_cbs,
@@ -411,9 +411,9 @@ cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm_rfc4115 = {
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_port,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_meter,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_profile,
+ (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_trtcm_rfc4115,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_port_id,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_profile_id,
- (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_trtcm_rfc4115,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_cir,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_eir,
(void *)&cmd_add_port_meter_profile_trtcm_rfc4115_cbs,
@@ -991,7 +991,7 @@ static void cmd_show_port_meter_stats_parsed(void *parsed_result,
printf("\tPkts R: %" PRIu64 "\n",
stats.n_pkts[RTE_MTR_RED]);
if (stats_mask & RTE_MTR_STATS_N_BYTES_RED)
- printf("\tBytes Y: %" PRIu64 "\n",
+ printf("\tBytes R: %" PRIu64 "\n",
stats.n_bytes[RTE_MTR_RED]);
if (stats_mask & RTE_MTR_STATS_N_PKTS_DROPPED)
printf("\tPkts DROPPED: %" PRIu64 "\n",
diff --git a/app/test-pmd/cmdline_tm.c b/app/test-pmd/cmdline_tm.c
index d40a427d..4dcddaff 100644
--- a/app/test-pmd/cmdline_tm.c
+++ b/app/test-pmd/cmdline_tm.c
@@ -795,8 +795,10 @@ struct cmd_add_port_tm_node_shaper_profile_result {
cmdline_fixed_string_t profile;
uint16_t port_id;
uint32_t shaper_id;
- uint64_t tb_rate;
- uint64_t tb_size;
+ uint64_t cmit_tb_rate;
+ uint64_t cmit_tb_size;
+ uint64_t peak_tb_rate;
+ uint64_t peak_tb_size;
uint32_t pktlen_adjust;
};
@@ -831,14 +833,22 @@ cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_shaper_id =
TOKEN_NUM_INITIALIZER(
struct cmd_add_port_tm_node_shaper_profile_result,
shaper_id, UINT32);
-cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_tb_rate =
+cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_cmit_tb_rate =
TOKEN_NUM_INITIALIZER(
struct cmd_add_port_tm_node_shaper_profile_result,
- tb_rate, UINT64);
-cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_tb_size =
+ cmit_tb_rate, UINT64);
+cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_cmit_tb_size =
TOKEN_NUM_INITIALIZER(
struct cmd_add_port_tm_node_shaper_profile_result,
- tb_size, UINT64);
+ cmit_tb_size, UINT64);
+cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_peak_tb_rate =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ peak_tb_rate, UINT64);
+cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_peak_tb_size =
+ TOKEN_NUM_INITIALIZER(
+ struct cmd_add_port_tm_node_shaper_profile_result,
+ peak_tb_size, UINT64);
cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_pktlen_adjust =
TOKEN_NUM_INITIALIZER(
struct cmd_add_port_tm_node_shaper_profile_result,
@@ -861,8 +871,10 @@ static void cmd_add_port_tm_node_shaper_profile_parsed(void *parsed_result,
/* Private shaper profile params */
memset(&sp, 0, sizeof(struct rte_tm_shaper_params));
- sp.peak.rate = res->tb_rate;
- sp.peak.size = res->tb_size;
+ sp.committed.rate = res->cmit_tb_rate;
+ sp.committed.size = res->cmit_tb_size;
+ sp.peak.rate = res->peak_tb_rate;
+ sp.peak.size = res->peak_tb_size;
sp.pkt_length_adjust = pkt_len_adjust;
ret = rte_tm_shaper_profile_add(port_id, shaper_id, &sp, &error);
@@ -885,8 +897,10 @@ cmdline_parse_inst_t cmd_add_port_tm_node_shaper_profile = {
(void *)&cmd_add_port_tm_node_shaper_profile_profile,
(void *)&cmd_add_port_tm_node_shaper_profile_port_id,
(void *)&cmd_add_port_tm_node_shaper_profile_shaper_id,
- (void *)&cmd_add_port_tm_node_shaper_profile_tb_rate,
- (void *)&cmd_add_port_tm_node_shaper_profile_tb_size,
+ (void *)&cmd_add_port_tm_node_shaper_profile_cmit_tb_rate,
+ (void *)&cmd_add_port_tm_node_shaper_profile_cmit_tb_size,
+ (void *)&cmd_add_port_tm_node_shaper_profile_peak_tb_rate,
+ (void *)&cmd_add_port_tm_node_shaper_profile_peak_tb_size,
(void *)&cmd_add_port_tm_node_shaper_profile_pktlen_adjust,
NULL,
},
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index aa29f5fc..4800e74e 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -135,7 +135,9 @@ parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + info->l3_len);
info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
- } else
+ } else if (info->l4_proto == IPPROTO_UDP)
+ info->l4_len = sizeof(struct udp_hdr);
+ else
info->l4_len = 0;
}
@@ -152,7 +154,9 @@ parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
if (info->l4_proto == IPPROTO_TCP) {
tcp_hdr = (struct tcp_hdr *)((char *)ipv6_hdr + info->l3_len);
info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
- } else
+ } else if (info->l4_proto == IPPROTO_UDP)
+ info->l4_len = sizeof(struct udp_hdr);
+ else
info->l4_len = 0;
}
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
index 5d51808f..1dfbcc4f 100644
--- a/app/test-pmd/parameters.c
+++ b/app/test-pmd/parameters.c
@@ -431,8 +431,11 @@ parse_portnuma_config(const char *q_arg)
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
if (new_socket_id(socket_id)) {
- print_invalid_socket_id_error();
- return -1;
+ if (num_sockets >= RTE_MAX_NUMA_NODES) {
+ print_invalid_socket_id_error();
+ return -1;
+ }
+ socket_ids[num_sockets++] = socket_id;
}
port_numa[port_id] = socket_id;
}
@@ -488,8 +491,11 @@ parse_ringnuma_config(const char *q_arg)
}
socket_id = (uint8_t)int_fld[FLD_SOCKET];
if (new_socket_id(socket_id)) {
- print_invalid_socket_id_error();
- return -1;
+ if (num_sockets >= RTE_MAX_NUMA_NODES) {
+ print_invalid_socket_id_error();
+ return -1;
+ }
+ socket_ids[num_sockets++] = socket_id;
}
ring_flag = (uint8_t)int_fld[FLD_FLAG];
if ((ring_flag < RX_RING_ONLY) || (ring_flag > RXTX_RING)) {
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
index fb6e8e33..ea2a956c 100644
--- a/app/test-pmd/rxonly.c
+++ b/app/test-pmd/rxonly.c
@@ -159,7 +159,7 @@ pkt_burst_receive(struct fwd_stream *fs)
}
if (ol_flags & PKT_RX_TIMESTAMP)
printf(" - timestamp %"PRIu64" ", mb->timestamp);
- if (ol_flags & PKT_RX_VLAN_STRIPPED)
+ if (ol_flags & PKT_RX_VLAN)
printf(" - VLAN tci=0x%x", mb->vlan_tci);
if (ol_flags & PKT_RX_QINQ_STRIPPED)
printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 32d68717..0adebddc 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -446,6 +446,8 @@ set_default_fwd_lcores_config(void)
nb_lc = 0;
for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
sock_num = rte_lcore_to_socket_id(i);
if (new_socket_id(sock_num)) {
if (num_sockets >= RTE_MAX_NUMA_NODES) {
@@ -455,8 +457,6 @@ set_default_fwd_lcores_config(void)
}
socket_ids[num_sockets++] = sock_num;
}
- if (!rte_lcore_is_enabled(i))
- continue;
if (i == rte_get_master_lcore())
continue;
fwd_lcores_cpuids[nb_lc++] = i;
@@ -483,9 +483,21 @@ set_default_fwd_ports_config(void)
portid_t pt_id;
int i = 0;
- RTE_ETH_FOREACH_DEV(pt_id)
+ RTE_ETH_FOREACH_DEV(pt_id) {
fwd_ports_ids[i++] = pt_id;
+ /* Update sockets info according to the attached device */
+ int socket_id = rte_eth_dev_socket_id(pt_id);
+ if (socket_id >= 0 && new_socket_id(socket_id)) {
+ if (num_sockets >= RTE_MAX_NUMA_NODES) {
+ rte_exit(EXIT_FAILURE,
+ "Total sockets greater than %u\n",
+ RTE_MAX_NUMA_NODES);
+ }
+ socket_ids[num_sockets++] = socket_id;
+ }
+ }
+
nb_cfg_ports = nb_ports;
nb_fwd_ports = nb_ports;
}
@@ -674,12 +686,6 @@ init_config(void)
memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
- if (numa_support) {
- memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
- memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
- memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
- }
-
/* Configuration of logical cores. */
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
sizeof(struct fwd_lcore *) * nb_lcores,
@@ -709,9 +715,12 @@ init_config(void)
else {
uint32_t socket_id = rte_eth_dev_socket_id(pid);
- /* if socket_id is invalid, set to 0 */
+ /*
+ * if socket_id is invalid,
+ * set to the first available socket.
+ */
if (check_socket_id(socket_id) < 0)
- socket_id = 0;
+ socket_id = socket_ids[0];
port_per_socket[socket_id]++;
}
}
@@ -845,9 +854,12 @@ init_fwd_streams(void)
else {
port->socket_id = rte_eth_dev_socket_id(pid);
- /* if socket_id is invalid, set to 0 */
+ /*
+ * if socket_id is invalid,
+ * set to the first available socket.
+ */
if (check_socket_id(port->socket_id) < 0)
- port->socket_id = 0;
+ port->socket_id = socket_ids[0];
}
}
else {
@@ -1896,9 +1908,9 @@ attach_port(char *identifier)
return;
socket_id = (unsigned)rte_eth_dev_socket_id(pi);
- /* if socket_id is invalid, set to 0 */
+ /* if socket_id is invalid, set to the first available socket. */
if (check_socket_id(socket_id) < 0)
- socket_id = 0;
+ socket_id = socket_ids[0];
reconfig(pi, socket_id);
rte_eth_promiscuous_enable(pi);
@@ -2066,11 +2078,11 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
RTE_SET_USED(ret_param);
if (type >= RTE_ETH_EVENT_MAX) {
- fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
+ fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
port_id, __func__, type);
fflush(stderr);
} else if (event_print_mask & (UINT32_C(1) << type)) {
- printf("\nPort %" PRIu8 ": %s event\n", port_id,
+ printf("\nPort %" PRIu16 ": %s event\n", port_id,
event_desc[type]);
fflush(stdout);
}
@@ -2464,6 +2476,11 @@ init_port(void)
"rte_zmalloc(%d struct rte_port) failed\n",
RTE_MAX_ETHPORTS);
}
+
+ /* Initialize ports NUMA structures */
+ memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
+ memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
+ memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
}
static void
diff --git a/app/test-pmd/tm.c b/app/test-pmd/tm.c
index dd837cb8..d3ba8650 100644
--- a/app/test-pmd/tm.c
+++ b/app/test-pmd/tm.c
@@ -616,6 +616,7 @@ softport_tm_tc_node_add(portid_t port_id, struct tm_hierarchy *h,
error->message,
shaper_profile_id);
+ free(tnp.shared_shaper_id);
return -1;
}
tnp.shaper_profile_id = shaper_profile_id;
@@ -631,6 +632,7 @@ softport_tm_tc_node_add(portid_t port_id, struct tm_hierarchy *h,
error->message,
h->tc_node_id[pos][k]);
+ free(tnp.shared_shaper_id);
return -1;
}
shaper_profile_id++;
diff --git a/config/common_base b/config/common_base
index 214d9a27..7b479229 100644
--- a/config/common_base
+++ b/config/common_base
@@ -105,6 +105,7 @@ CONFIG_RTE_EAL_IGB_UIO=n
CONFIG_RTE_EAL_VFIO=n
CONFIG_RTE_MALLOC_DEBUG=n
CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES=n
+CONFIG_RTE_USE_LIBBSD=n
#
# Recognize/ignore the AVX/AVX512 CPU flags for performance/power testing.
@@ -233,7 +234,8 @@ CONFIG_RTE_LIBRTE_MLX4_DEBUG=n
CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8
#
-# Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD
+# Compile burst-oriented Mellanox ConnectX-4, ConnectX-5 & Bluefield
+# (MLX5) PMD
#
CONFIG_RTE_LIBRTE_MLX5_PMD=n
CONFIG_RTE_LIBRTE_MLX5_DEBUG=n
diff --git a/devtools/check-git-log.sh b/devtools/check-git-log.sh
index 910daba4..9014c824 100755
--- a/devtools/check-git-log.sh
+++ b/devtools/check-git-log.sh
@@ -106,8 +106,8 @@ bad=$(echo "$headlines" | grep --color=always \
# check headline lowercase for first words
bad=$(echo "$headlines" | grep --color=always \
- -e '^.*[A-Z].*:' \
- -e ': *[A-Z]' \
+ -e '^.*[[:upper:]].*:' \
+ -e ': *[[:upper:]]' \
| sed 's,^,\t,')
[ -z "$bad" ] || printf "Wrong headline uppercase:\n$bad\n"
diff --git a/devtools/cocci/strlcpy.cocci b/devtools/cocci/strlcpy.cocci
new file mode 100644
index 00000000..335e2712
--- /dev/null
+++ b/devtools/cocci/strlcpy.cocci
@@ -0,0 +1,8 @@
+@use_strlcpy@
+identifier src, dst;
+expression size;
+@@
+(
+- snprintf(dst, size, "%s", src)
++ strlcpy(dst, src, size)
+)
diff --git a/doc/guides/contributing/documentation.rst b/doc/guides/contributing/documentation.rst
index 170dacdb..20a2c482 100644
--- a/doc/guides/contributing/documentation.rst
+++ b/doc/guides/contributing/documentation.rst
@@ -80,7 +80,7 @@ added to by the developer.
* **API documentation**
The API documentation explains how to use the public DPDK functions.
- The `API index page <http://dpdk.org/doc/api/>`_ shows the generated API documentation with related groups of functions.
+ The `API index page <http://doc.dpdk.org/api/>`_ shows the generated API documentation with related groups of functions.
The API documentation should be updated via Doxygen comments when new functions are added.
@@ -655,7 +655,7 @@ The following are some guidelines for use of Doxygen in the DPDK API documentati
*/
In the API documentation the functions will be rendered as links, see the
- `online section of the rte_ethdev.h docs <http://dpdk.org/doc/api/rte__ethdev_8h.html>`_ that contains the above text.
+ `online section of the rte_ethdev.h docs <http://doc.dpdk.org/api/rte__ethdev_8h.html>`_ that contains the above text.
* The ``@see`` keyword can be used to create a *see also* link to another file or library.
This directive should be placed on one line at the bottom of the documentation section.
diff --git a/doc/guides/contributing/patches.rst b/doc/guides/contributing/patches.rst
index 40983c15..daa4edad 100644
--- a/doc/guides/contributing/patches.rst
+++ b/doc/guides/contributing/patches.rst
@@ -25,9 +25,9 @@ The DPDK development process has the following features:
* All sub-repositories are merged into main repository for ``-rc1`` and ``-rc2`` versions of the release.
* After the ``-rc2`` release all patches should target the main repository.
-The mailing list for DPDK development is `dev@dpdk.org <http://dpdk.org/ml/archives/dev/>`_.
-Contributors will need to `register for the mailing list <http://dpdk.org/ml/listinfo/dev>`_ in order to submit patches.
-It is also worth registering for the DPDK `Patchwork <http://dpdk.org/dev/patchwork/project/dpdk/list/>`_
+The mailing list for DPDK development is `dev@dpdk.org <http://mails.dpdk.org/archives/dev/>`_.
+Contributors will need to `register for the mailing list <http://mails.dpdk.org/listinfo/dev>`_ in order to submit patches.
+It is also worth registering for the DPDK `Patchwork <http://patches.dpdk.org/project/dpdk/list/>`_
The development process requires some familiarity with the ``git`` version control system.
Refer to the `Pro Git Book <http://www.git-scm.com/book/>`_ for further information.
@@ -104,7 +104,7 @@ main repository::
git clone git://dpdk.org/dpdk
git clone http://dpdk.org/git/dpdk
-sub-repositories (`list <http://dpdk.org/browse/next>`_)::
+sub-repositories (`list <http://git.dpdk.org/next>`_)::
git clone git://dpdk.org/next/dpdk-next-*
git clone http://dpdk.org/git/next/dpdk-next-*
@@ -418,7 +418,7 @@ If the patch is in relation to a previous email thread you can add it to the sam
git send-email --to dev@dpdk.org --in-reply-to <1234-foo@bar.com> 000*.patch
The Message ID can be found in the raw text of emails or at the top of each Patchwork patch,
-`for example <http://dpdk.org/dev/patchwork/patch/7646/>`_.
+`for example <http://patches.dpdk.org/patch/7646/>`_.
Shallow threading (``--thread --no-chain-reply-to``) is preferred for a patch series.
Once submitted your patches will appear on the mailing list and in Patchwork.
diff --git a/doc/guides/contributing/stable.rst b/doc/guides/contributing/stable.rst
index 0f2f1f37..326381d2 100644
--- a/doc/guides/contributing/stable.rst
+++ b/doc/guides/contributing/stable.rst
@@ -75,7 +75,7 @@ The Stable and LTS release are coordinated on the stable@dpdk.org mailing
list.
All fix patches to the master branch that are candidates for backporting
-should also be CCed to the `stable@dpdk.org <http://dpdk.org/ml/listinfo/stable>`_
+should also be CCed to the `stable@dpdk.org <http://mails.dpdk.org/listinfo/stable>`_
mailing list.
@@ -86,10 +86,10 @@ A Stable Release will be released by:
* Tagging the release with YY.MM.n (year, month, number).
* Uploading a tarball of the release to dpdk.org.
-* Sending an announcement to the `announce@dpdk.org <http://dpdk.org/ml/listinfo/announce>`_
+* Sending an announcement to the `announce@dpdk.org <http://mails.dpdk.org/listinfo/announce>`_
list.
-Stable releases are available on the `dpdk.org download page <http://dpdk.org/download>`_.
+Stable releases are available on the `dpdk.org download page <http://core.dpdk.org/download/>`_.
ABI
diff --git a/doc/guides/cryptodevs/features/qat.ini b/doc/guides/cryptodevs/features/qat.ini
index 40da8985..eeaf7de4 100644
--- a/doc/guides/cryptodevs/features/qat.ini
+++ b/doc/guides/cryptodevs/features/qat.ini
@@ -51,3 +51,6 @@ ZUC EIA3 = Y
AES GCM (128) = Y
AES GCM (192) = Y
AES GCM (256) = Y
+AES CCM (128) = Y
+AES CCM (192) = Y
+AES CCM (256) = Y
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index 581cd2f7..8418115b 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -80,6 +80,7 @@ Hash algorithms:
Supported AEAD algorithms:
* ``RTE_CRYPTO_AEAD_AES_GCM``
+* ``RTE_CRYPTO_AEAD_AES_CCM``
Limitations
diff --git a/doc/guides/freebsd_gsg/install_from_ports.rst b/doc/guides/freebsd_gsg/install_from_ports.rst
index 67e63d27..178f710a 100644
--- a/doc/guides/freebsd_gsg/install_from_ports.rst
+++ b/doc/guides/freebsd_gsg/install_from_ports.rst
@@ -89,7 +89,7 @@ environmental variables should be set as below:
.. note::
To install a copy of the DPDK compiled using gcc, please download the
- official DPDK package from http://dpdk.org/ and install manually using
+ official DPDK package from http://core.dpdk.org/download/ and install manually using
the instructions given in the next chapter, :ref:`building_from_source`
An example application can therefore be copied to a user's home directory and
diff --git a/doc/guides/howto/flow_bifurcation.rst b/doc/guides/howto/flow_bifurcation.rst
index 61016a4f..3fbf3792 100644
--- a/doc/guides/howto/flow_bifurcation.rst
+++ b/doc/guides/howto/flow_bifurcation.rst
@@ -296,4 +296,4 @@ The typical procedure to achieve this is as follows:
'not involved', while ``00`` or no mask means 'involved'.
* For more details of the configuration, refer to the
- `cloud filter test plan <http://dpdk.org/browse/tools/dts/tree/test_plans/cloud_filter_test_plan.rst>`_
+ `cloud filter test plan <http://git.dpdk.org/tools/dts/tree/test_plans/cloud_filter_test_plan.rst>`_
diff --git a/doc/guides/howto/rte_flow.rst b/doc/guides/howto/rte_flow.rst
index 4a27c995..cc55413f 100644
--- a/doc/guides/howto/rte_flow.rst
+++ b/doc/guides/howto/rte_flow.rst
@@ -60,10 +60,10 @@ Code
.. code-block:: c
/* create the attribute structure */
- struct rte_flow_attr attr = {.ingress = 1};
+ struct rte_flow_attr attr = { .ingress = 1 };
struct rte_flow_item pattern[MAX_PATTERN_IN_FLOW];
struct rte_flow_action actions[MAX_ACTIONS_IN_FLOW];
- struct rte_flow_item_etc eth;
+ struct rte_flow_item_eth eth;
struct rte_flow_item_vlan vlan;
struct rte_flow_item_ipv4 ipv4;
struct rte_flow *flow;
@@ -83,15 +83,15 @@ Code
pattern[2].spec = &ipv4;
/* end the pattern array */
- pattern[3].type = RTE_FLOW_ITEM)TYPE_END;
+ pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
/* create the drop action */
actions[0].type = RTE_FLOW_ACTION_TYPE_DROP;
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
/* validate and create the flow rule */
- if (!rte_flow_validate(port_id, &attr, pattern, actions, &error)
- flow = rte_flow_create(port_id, &attr, pattern, actions, &error)
+ if (!rte_flow_validate(port_id, &attr, pattern, actions, &error))
+ flow = rte_flow_create(port_id, &attr, pattern, actions, &error);
Output
~~~~~~
@@ -148,7 +148,7 @@ clarity)::
tpmd> flow create 0 ingress pattern eth / vlan /
ipv4 dst spec 192.168.3.0 dst mask 255.255.255.0 /
- end actions drop / end
+ end actions drop / end
Code
~~~~
@@ -158,7 +158,7 @@ Code
struct rte_flow_attr attr = {.ingress = 1};
struct rte_flow_item pattern[MAX_PATTERN_IN_FLOW];
struct rte_flow_action actions[MAX_ACTIONS_IN_FLOW];
- struct rte_flow_item_etc eth;
+ struct rte_flow_item_eth eth;
struct rte_flow_item_vlan vlan;
struct rte_flow_item_ipv4 ipv4;
struct rte_flow_item_ipv4 ipv4_mask;
@@ -181,15 +181,15 @@ Code
pattern[2].mask = &ipv4_mask;
/* end the pattern array */
- pattern[3].type = RTE_FLOW_ITEM)TYPE_END;
+ pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
/* create the drop action */
actions[0].type = RTE_FLOW_ACTION_TYPE_DROP;
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
/* validate and create the flow rule */
- if (!rte_flow_validate(port_id, &attr, pattern, actions, &error)
- flow = rte_flow_create(port_id, &attr, pattern, actions, &error)
+ if (!rte_flow_validate(port_id, &attr, pattern, actions, &error))
+ flow = rte_flow_create(port_id, &attr, pattern, actions, &error);
Output
~~~~~~
@@ -255,10 +255,10 @@ Code
.. code-block:: c
- struct rte_flow_attr attr = {.ingress = 1};
+ struct rte_flow_attr attr = { .ingress = 1 };
struct rte_flow_item pattern[MAX_PATTERN_IN_FLOW];
struct rte_flow_action actions[MAX_ACTIONS_IN_FLOW];
- struct rte_flow_item_etc eth;
+ struct rte_flow_item_eth eth;
struct rte_flow_item_vlan vlan;
struct rte_flow_action_queue queue = { .index = 3 };
struct rte_flow *flow;
@@ -274,16 +274,16 @@ Code
pattern[1].spec = &vlan;
/* end the pattern array */
- pattern[2].type = RTE_FLOW_ITEM)TYPE_END;
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_END;
/* create the drop action */
actions[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
- actions[0].conf = &queue
+ actions[0].conf = &queue;
actions[1].type = RTE_FLOW_ACTION_TYPE_END;
/* validate and create the flow rule */
- if (!rte_flow_validate(port_id, &attr, pattern, actions, &error)
- flow = rte_flow_create(port_id, &attr, pattern, actions, &error)
+ if (!rte_flow_validate(port_id, &attr, pattern, actions, &error))
+ flow = rte_flow_create(port_id, &attr, pattern, actions, &error);
Output
~~~~~~
diff --git a/doc/guides/linux_gsg/nic_perf_intel_platform.rst b/doc/guides/linux_gsg/nic_perf_intel_platform.rst
index febd7337..2ef6ed7c 100644
--- a/doc/guides/linux_gsg/nic_perf_intel_platform.rst
+++ b/doc/guides/linux_gsg/nic_perf_intel_platform.rst
@@ -64,7 +64,7 @@ This aligns with the previous output which showed that each channel has one memo
Network Interface Card Requirements
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Use a `DPDK supported <http://dpdk.org/doc/nics>`_ high end NIC such as the Intel XL710 40GbE.
+Use a `DPDK supported <http://core.dpdk.org/supported/>`_ high end NIC such as the Intel XL710 40GbE.
Make sure each NIC has been flashed the latest version of NVM/firmware.
diff --git a/doc/guides/linux_gsg/sys_reqs.rst b/doc/guides/linux_gsg/sys_reqs.rst
index 3e7fe637..7a91b31c 100644
--- a/doc/guides/linux_gsg/sys_reqs.rst
+++ b/doc/guides/linux_gsg/sys_reqs.rst
@@ -91,7 +91,11 @@ Compilation of the DPDK
x86_x32 ABI is currently supported with distribution packages only on Ubuntu
higher than 13.10 or recent Debian distribution. The only supported compiler is gcc 4.9+.
-* libnuma-devel - library for handling NUMA (Non Uniform Memory Access).
+* Library for handling NUMA (Non Uniform Memory Access).
+
+ * numactl-devel in Red Hat/Fedora;
+
+ * libnuma-dev in Debian/Ubuntu;
* Python, version 2.7+ or 3.2+, to use various helper scripts included in the DPDK package.
diff --git a/doc/guides/nics/ena.rst b/doc/guides/nics/ena.rst
index d19912e9..695960d4 100644
--- a/doc/guides/nics/ena.rst
+++ b/doc/guides/nics/ena.rst
@@ -187,11 +187,20 @@ Prerequisites
-------------
#. Prepare the system as recommended by DPDK suite. This includes environment
- variables, hugepages configuration, tool-chains and configuration
+ variables, hugepages configuration, tool-chains and configuration.
-#. Insert igb_uio kernel module using the command 'modprobe igb_uio'
+#. ENA PMD can operate with ``vfio-pci`` or ``igb_uio`` driver.
-#. Bind the intended ENA device to igb_uio module
+#. Insert ``vfio-pci`` or ``igb_uio`` kernel module using the command
+ ``modprobe vfio-pci`` or ``modprobe igb_uio`` respectively.
+
+#. For ``vfio-pci`` users only:
+ Please make sure that ``IOMMU`` is enabled in your system,
+ or use ``vfio`` driver in ``noiommu`` mode::
+
+ echo 1 > /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
+
+#. Bind the intended ENA device to ``vfio-pci`` or ``igb_uio`` module.
At this point the system should be ready to run DPDK applications. Once the
diff --git a/doc/guides/nics/enic.rst b/doc/guides/nics/enic.rst
index a11627a0..94ec6fb4 100644
--- a/doc/guides/nics/enic.rst
+++ b/doc/guides/nics/enic.rst
@@ -39,7 +39,7 @@ How to obtain ENIC PMD integrated DPDK
--------------------------------------
ENIC PMD support is integrated into the DPDK suite. dpdk-<version>.tar.gz
-should be downloaded from http://dpdk.org
+should be downloaded from http://core.dpdk.org/download/
Configuration information
diff --git a/doc/guides/nics/features.rst b/doc/guides/nics/features.rst
index d5bf38a2..f31aee93 100644
--- a/doc/guides/nics/features.rst
+++ b/doc/guides/nics/features.rst
@@ -235,7 +235,7 @@ Supports TCP Segmentation Offloading.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_TCP_TSO``.
* **[uses] rte_eth_desc_lim**: ``nb_seg_max``, ``nb_mtu_seg_max``.
-* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_TCP_SEG``.
+* **[uses] mbuf**: ``mbuf.ol_flags:`` ``PKT_TX_TCP_SEG``, ``PKT_TX_IPV4``, ``PKT_TX_IPV6``, ``PKT_TX_IP_CKSUM``.
* **[uses] mbuf**: ``mbuf.tso_segsz``, ``mbuf.l2_len``, ``mbuf.l3_len``, ``mbuf.l4_len``.
* **[implements] datapath**: ``TSO functionality``.
* **[provides] rte_eth_dev_info**: ``tx_offload_capa,tx_queue_offload_capa:DEV_TX_OFFLOAD_TCP_TSO,DEV_TX_OFFLOAD_UDP_TSO``.
@@ -577,6 +577,7 @@ Supports L3 checksum offload.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_IPV4_CKSUM``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IP_CKSUM``,
``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``.
+* **[uses] mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_IP_CKSUM_UNKNOWN`` |
``PKT_RX_IP_CKSUM_BAD`` | ``PKT_RX_IP_CKSUM_GOOD`` |
``PKT_RX_IP_CKSUM_NONE``.
@@ -597,6 +598,7 @@ Supports L4 checksum offload.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
+* **[uses] mbuf**: ``mbuf.l2_len``, ``mbuf.l3_len``.
* **[provides] mbuf**: ``mbuf.ol_flags:PKT_RX_L4_CKSUM_UNKNOWN`` |
``PKT_RX_L4_CKSUM_BAD`` | ``PKT_RX_L4_CKSUM_GOOD`` |
``PKT_RX_L4_CKSUM_NONE``.
diff --git a/doc/guides/nics/features/ena.ini b/doc/guides/nics/features/ena.ini
index 691c1e3d..aa6f05a7 100644
--- a/doc/guides/nics/features/ena.ini
+++ b/doc/guides/nics/features/ena.ini
@@ -23,5 +23,6 @@ Inner L4 checksum = Y
Basic stats = Y
Extended stats = Y
Linux UIO = Y
+Linux VFIO = Y
x86-32 = Y
x86-64 = Y
diff --git a/doc/guides/nics/features/failsafe.ini b/doc/guides/nics/features/failsafe.ini
index a42e344a..dc824107 100644
--- a/doc/guides/nics/features/failsafe.ini
+++ b/doc/guides/nics/features/failsafe.ini
@@ -11,7 +11,6 @@ Jumbo frame = Y
Promiscuous mode = Y
Allmulticast mode = Y
Unicast MAC filter = Y
-Multicast MAC filter = Y
VLAN filter = Y
Flow control = Y
Flow API = Y
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 50fced3f..ee588233 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -32,9 +32,9 @@ MLX5 poll mode driver
=====================
The MLX5 poll mode driver library (**librte_pmd_mlx5**) provides support
-for **Mellanox ConnectX-4**, **Mellanox ConnectX-4 Lx** and **Mellanox
-ConnectX-5** families of 10/25/40/50/100 Gb/s adapters as well as their
-virtual functions (VF) in SR-IOV context.
+for **Mellanox ConnectX-4**, **Mellanox ConnectX-4 Lx** , **Mellanox
+ConnectX-5** and **Mellanox Bluefield** families of 10/25/40/50/100 Gb/s
+adapters as well as their virtual functions (VF) in SR-IOV context.
Information and documentation about these adapters can be found on the
`Mellanox website <http://www.mellanox.com>`__. Help is also provided by the
@@ -214,8 +214,8 @@ Run-time configuration
Supported on:
- - x86_64 with ConnectX-4, ConnectX-4 LX and ConnectX-5.
- - POWER8 and ARMv8 with ConnectX-4 LX and ConnectX-5.
+ - x86_64 with ConnectX-4, ConnectX-4 LX, ConnectX-5 and Bluefield.
+ - POWER8 and ARMv8 with ConnectX-4 LX, ConnectX-5 and Bluefield.
- ``txq_inline`` parameter [int]
@@ -234,34 +234,54 @@ Run-time configuration
This option should be used in combination with ``txq_inline`` above.
- On ConnectX-4, ConnectX-4 LX and ConnectX-5 without Enhanced MPW:
+ On ConnectX-4, ConnectX-4 LX, ConnectX-5 and Bluefield without
+ Enhanced MPW:
- Disabled by default.
- In case ``txq_inline`` is set recommendation is 4.
- On ConnectX-5 with Enhanced MPW:
+ On ConnectX-5 and Bluefield with Enhanced MPW:
- Set to 8 by default.
+- ``txqs_max_vec`` parameter [int]
+
+ Enable vectorized Tx only when the number of TX queues is less than or
+ equal to this value. Effective only when ``tx_vec_en`` is enabled.
+
+ On ConnectX-5:
+
+ - Set to 8 by default on ARMv8.
+ - Set to 4 by default otherwise.
+
+ On Bluefield
+
+ - Set to 16 by default.
+
- ``txq_mpw_en`` parameter [int]
A nonzero value enables multi-packet send (MPS) for ConnectX-4 Lx and
- enhanced multi-packet send (Enhanced MPS) for ConnectX-5. MPS allows the
- TX burst function to pack up multiple packets in a single descriptor
- session in order to save PCI bandwidth and improve performance at the
- cost of a slightly higher CPU usage. When ``txq_inline`` is set along
- with ``txq_mpw_en``, TX burst function tries to copy entire packet data
- on to TX descriptor instead of including pointer of packet only if there
- is enough room remained in the descriptor. ``txq_inline`` sets
- per-descriptor space for either pointers or inlined packets. In addition,
- Enhanced MPS supports hybrid mode - mixing inlined packets and pointers
- in the same descriptor.
+ enhanced multi-packet send (Enhanced MPS) for ConnectX-5 and Bluefield.
+ MPS allows the TX burst function to pack up multiple packets in a
+ single descriptor session in order to save PCI bandwidth and improve
+ performance at the cost of a slightly higher CPU usage. When
+ ``txq_inline`` is set along with ``txq_mpw_en``, TX burst function tries
+ to copy entire packet data on to TX descriptor instead of including
+ pointer of packet only if there is enough room remained in the
+ descriptor. ``txq_inline`` sets per-descriptor space for either pointers
+ or inlined packets. In addition, Enhanced MPS supports hybrid mode -
+ mixing inlined packets and pointers in the same descriptor.
This option cannot be used in conjunction with ``tso`` below. When ``tso``
is set, ``txq_mpw_en`` is disabled.
- It is currently only supported on the ConnectX-4 Lx and ConnectX-5
- families of adapters. Enabled by default.
+ It is currently only supported on the ConnectX-4 Lx, ConnectX-5 and Bluefield
+ families of adapters.
+ On ConnectX-4 Lx the MPW is considered un-secure hence disabled by default.
+ Users which enable the MPW should be aware that application which provides incorrect
+ mbuf descriptors in the Tx burst can lead to serious errors in the host including, on some cases,
+ NIC to get stuck.
+ On ConnectX-5 and Bluefield the MPW is secure and enabled by default.
- ``txq_mpw_hdr_dseg_en`` parameter [int]
@@ -287,10 +307,10 @@ Run-time configuration
- ``tx_vec_en`` parameter [int]
- A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
- global Tx queues on the port is lesser than MLX5_VPMD_MIN_TXQS.
+ A nonzero value enables Tx vector on ConnectX-5 and Bluefield NICs if the number of
+ global Tx queues on the port is less than ``txqs_max_vec``.
- Enabled by default on ConnectX-5.
+ Enabled by default on ConnectX-5 and Bluefield.
- ``rx_vec_en`` parameter [int]
@@ -318,8 +338,9 @@ DPDK and must be installed separately:
- **libmlx5**
- Low-level user space driver library for Mellanox ConnectX-4/ConnectX-5
- devices, it is automatically loaded by libibverbs.
+ Low-level user space driver library for Mellanox
+ ConnectX-4/ConnectX-5/Bluefield devices, it is automatically loaded
+ by libibverbs.
This library basically implements send/receive calls to the hardware
queues.
@@ -333,15 +354,16 @@ DPDK and must be installed separately:
Unlike most other PMDs, these modules must remain loaded and bound to
their devices:
- - mlx5_core: hardware driver managing Mellanox ConnectX-4/ConnectX-5
- devices and related Ethernet kernel network devices.
+ - mlx5_core: hardware driver managing Mellanox
+ ConnectX-4/ConnectX-5/Bluefield devices and related Ethernet kernel
+ network devices.
- mlx5_ib: InifiniBand device driver.
- ib_uverbs: user space driver for Verbs (entry point for libibverbs).
- **Firmware update**
- Mellanox OFED releases include firmware updates for ConnectX-4/ConnectX-5
- adapters.
+ Mellanox OFED releases include firmware updates for
+ ConnectX-4/ConnectX-5/Bluefield adapters.
Because each release provides new features, these updates must be applied to
match the kernel modules and libraries they come with.
@@ -378,6 +400,7 @@ Mellanox OFED
- ConnectX-4 Lx: **14.21.1000** and above.
- ConnectX-5: **16.21.1000** and above.
- ConnectX-5 Ex: **16.21.1000** and above.
+ - Bluefield: **18.99.3950** and above.
While these libraries and kernel modules are available on OpenFabrics
Alliance's `website <https://www.openfabrics.org/>`__ and provided by package
@@ -589,7 +612,7 @@ Usage example
-------------
This section demonstrates how to launch **testpmd** with Mellanox
-ConnectX-4/ConnectX-5 devices managed by librte_pmd_mlx5.
+ConnectX-4/ConnectX-5/Bluefield devices managed by librte_pmd_mlx5.
#. Load the kernel modules:
diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst
index af82f86e..9d05cca0 100644
--- a/doc/guides/nics/virtio.rst
+++ b/doc/guides/nics/virtio.rst
@@ -74,7 +74,7 @@ In this release, the virtio PMD driver provides the basic functionality of packe
* The descriptor number for the Rx/Tx queue is hard-coded to be 256 by qemu 2.7 and below.
If given a different descriptor number by the upper application,
the virtio PMD generates a warning and fall back to the hard-coded value.
- Rx queue size can be configureable and up to 1024 since qemu 2.8 and above. Rx queue size is 256
+ Rx queue size can be configurable and up to 1024 since qemu 2.8 and above. Rx queue size is 256
by default. Tx queue size is still hard-coded to be 256.
* Features of mac/vlan filter are supported, negotiation with vhost/backend are needed to support them.
diff --git a/doc/guides/rel_notes/release_17_11.rst b/doc/guides/rel_notes/release_17_11.rst
index e17fd2ca..740854f6 100644
--- a/doc/guides/rel_notes/release_17_11.rst
+++ b/doc/guides/rel_notes/release_17_11.rst
@@ -1462,3 +1462,227 @@ Fixes in 17.11 LTS Release
* vhost: improve dirty pages logging performance
* vhost: release locks on RARP packet failure
* vhost: retranslate vring addr when memory table changes
+
+17.11.5
+~~~~~~~
+
+* acl: forbid rule with priority zero
+* app/pdump: fix port id storage size
+* app/procinfo: fix sprintf overrun
+* app/test-crypto-perf: fix check for auth key
+* app/test-crypto-perf: fix check for cipher IV
+* app/test-crypto-perf: fix double allocation of memory
+* app/testpmd: check Rx VLAN offload flag to print VLAN TCI
+* app/testpmd: fix csum parse-tunnel command invocation
+* app/testpmd: fix duplicate exit
+* app/testpmd: fix L4 length for UDP checksum
+* app/testpmd: fix memory leak for TM object
+* app/testpmd: fix metering and policing commands
+* app/testpmd: fix physical port socket initialization
+* app/testpmd: fix printf format in event callback
+* app/testpmd: fix RED byte stats
+* app/testpmd: fix shaper profile parameters
+* app/testpmd: fix vdev socket initialization
+* app/testpmd: optimize mbuf pool allocation
+* app/testpmd: reserve NUMA node per port and per ring
+* build: enable ARM NEON flag when __aarch64__ defined
+* bus/dpaa: fix build with gcc 9.0
+* bus/dpaa: fix inconsistent struct alignment
+* bus/pci: compare kernel driver instead of interrupt handler
+* bus/pci: fix allocation of device path
+* bus/pci: fix config r/w access
+* bus/pci: replace strncpy by strlcpy
+* crypto/aesni_mb: fix possible array overrun
+* crypto/mvsam: update hash digest sizes
+* crypto/scheduler: fix build with gcc 8.2
+* devtools: provide more generic grep in git check
+* doc: add cross-compilation in sample apps guide
+* doc: add VFIO in ENA guide
+* doc: clarify L3 Tx checksum prerequisite
+* doc: clarify L4 Tx checksum prerequisite
+* doc: clarify TSO Tx offload prerequisite
+* doc: fix formatting in IP reassembly app guide
+* doc: fix missing CCM to QAT feature list
+* doc: fix NUMA library name in Linux guide
+* doc: fix spelling in PMD guides
+* doc: fix style and syntax in flow API guide
+* doc: fix typo in testpmd guide
+* doc: fix typos in the flow API guide
+* doc: fix wrong usage of bind command
+* eal/arm64: fix instrinsic for GCC < 4.9
+* eal: declare trace buffer at top of own block
+* eal: explicit cast in constant byte swap
+* eal: explicit cast in rwlock functions
+* eal: explicit cast of builtin for bsf32
+* eal: explicit cast of core id when getting index
+* eal: explicit cast of strlcpy return
+* eal: fix build
+* eal: fix build with gcc 9.0
+* eal: fix build with -O1
+* eal: fix casts in random functions
+* eal: introduce rte version of fls
+* eal/linux: fix memory leak of logid
+* eal/linux: handle UIO read failure in interrupt handler
+* eal: support strlcpy function
+* eal: use correct data type for bitmap slab operations
+* eal/x86: fix type of variable in memcpy function
+* eal/x86: remove unused memcpy file
+* efd: fix write unlock during ring creation
+* ethdev: explicit cast of buffered Tx number
+* ethdev: explicit cast of queue count return
+* ethdev: fix doxygen comment to be with structure
+* ethdev: fix queue start and stop
+* ethdev: fix type and scope of variables in Rx burst
+* eventdev: fix eth Rx adapter hotplug incompatibility
+* eventdev: fix unlock in Rx adapter
+* event/sw: fix cq index check for unlink usecases
+* examples/flow_filtering: remove VLAN item
+* examples/ipv4_multicast: fix leak of cloned packets
+* examples/vhost: remove unnecessary constant
+* fix dpdk.org URLs
+* gro: fix overflow of TCP payload calculation
+* hash: explicit casts for truncation in CRC32c
+* hash: move stack declaration at top of CRC32c function
+* igb_uio: fix refcount if open returns error
+* ip_frag: fix overflow in key comparison
+* ip_frag: use key length for key comparison
+* kni: fix build on Linux < 3.14
+* kni: fix build on Linux 4.19
+* kni: fix kernel FIFO synchronization
+* kni: fix possible uninitialized variable
+* kvargs: fix processing a null list
+* latency: fix timestamp marking and latency calculation
+* mbuf: avoid implicit demotion in 64-bit arithmetic
+* mbuf: avoid integer promotion in prepend/adj/chain
+* mbuf: explicit cast of headroom on reset
+* mbuf: explicit cast of size on detach
+* mbuf: explicit casts of reference counter
+* mbuf: fix reference counter integer promotion
+* mbuf: fix Tx offload mask
+* mbuf: fix type of private size in detach
+* mbuf: fix type of variables in linearize function
+* mem: fix memory initialization time
+* mem: fix undefined behavior in NUMA-aware mapping
+* mk: disable gcc AVX512F support
+* net/bnx2x: fix call to link handling periodic function
+* net/bnx2x: fix logging to include device name
+* net/bnx2x: fix to add PHY lock
+* net/bnx2x: fix to disable further interrupts
+* net/bnx2x: fix VF link state update
+* net/bnxt: fix registration of VF async event completion ring
+* net/bnxt: fix uninitialized pointer access in Tx
+* net/bnxt: set MAC filtering as outer for non tunnel frames
+* net/bnxt: set VLAN strip mode before default VNIC cfg
+* net/bonding: do not ignore RSS key on device config
+* net/bonding: fix crash when stopping mode 4 port
+* net/bonding: fix possible silent failure in config
+* net/bonding: fix Rx slave fairness
+* net/bonding: stop and deactivate slaves on stop
+* net/bonding: support matching QinQ ethertype
+* net/bonding: use evenly distributed default RSS RETA
+* net/e1000/base: fix uninitialized variable
+* net/e1000: do not error out if Rx drop enable is set
+* net/ena: fix passing RSS hash to mbuf
+* net/enic: do not use non-standard integer types
+* net/enic: fix flow API memory leak
+* net/enic: set Rx VLAN offload flag for non-stripped packets
+* net: explicit cast in L4 checksum
+* net: explicit cast of IP checksum to 16-bit
+* net: explicit cast of multicast bit clearing
+* net: explicit cast of protocol in IPv6 checksum
+* net/failsafe: add checks for deferred queue setup
+* net/failsafe: fix crash on slave queue release
+* net/failsafe: remove not supported multicast MAC filter
+* net: fix build with pedantic
+* net: fix Intel prepare function for IP checksum offload
+* net/i40e/base: correct global reset timeout calculation
+* net/i40e/base: fix comment referencing internal data
+* net/i40e/base: gracefully clean the resources
+* net/i40e/base: properly clean resources
+* net/i40e: enable loopback function for X722 MAC
+* net/i40e: fix link status update
+* net/i40e: fix offload not supported mask
+* net/i40e: fix send admin queue command before init
+* net/i40e: fix X710 Rx after reading some registers
+* net/i40e: keep promiscuous on if allmulticast is enabled
+* net/i40e: update Tx offload mask
+* net/igb: update Tx offload mask
+* net/ixgbe: fix busy polling while fiber link update
+* net/ixgbe: fix maximum wait time in comment
+* net/ixgbe: stop link setup alarm handler before device start
+* net/ixgbe: update Tx offload mask
+* net/ixgbevf: fix link state
+* net/ixgbe: wait longer for link after fiber MAC setup
+* net/mlx4: fix possible uninitialized variable
+* net/mlx5: add Bluefield device id
+* net/mlx5: disable ConnectX-4 Lx Multi Packet Send by default
+* net/mlx5: fix initialization of struct members
+* net/mlx5: fix interrupt completion queue index wrapping
+* net/mlx5: fix multi-chunk mempool support
+* net/mlx5: make vectorized Tx threshold configurable
+* net/mlx5: optimize Rx buffer replenishment threshold
+* net: move stack variable at top of VLAN strip function
+* net/mvpp2: fix array initialization
+* net/nfp: fix mbuf flags with checksum good
+* net/nfp: fix memcpy out of source range
+* net/nfp: fix misuse of strlcpy
+* net/nfp: fix RSS
+* net/nfp: replace strncpy by strlcpy
+* net/octeontx: fix failures when available ports > queues
+* net/octeontx: fix mbuf corruption with large private sizes
+* net/octeontx: fix packet corruption on Tx
+* net/qede/base: fix MFW FLR flow
+* net/qede/base: fix to handle stag update event
+* net/qede: fix crash when configure fails
+* net/qede: fix ethernet type in HW registers
+* net/qede: fix flow director for IPv6 filter
+* net/qede: fix Rx buffer size calculation
+* net/qede: fix strncpy
+* net/qede: fix Tx offload mask
+* net/qede: fix Tx tunnel offload support mask
+* net/qede: replace strncpy by strlcpy
+* net/sfc: allow to query RSS key and HF in isolated mode
+* net/sfc: allow to query RSS key and HF when RSS is disabled
+* net/sfc/base: avoid usage of too big arrays on stack
+* net/sfc/base: check size of memory to read sensors data to
+* net/sfc/base: fix a typo in unicast filter insertion comment
+* net/sfc/base: fix build because of no declaration
+* net/sfc/base: fix ID retrieval in v3 licensing
+* net/sfc/base: fix MAC Tx stats for less or equal to 64 bytes
+* net/sfc/base: fix out of bounds read when dereferencing sdup
+* net/sfc/base: fix PreFAST warnings because of unused return
+* net/sfc/base: fix SAL annotation for input buffers
+* net/sfc/base: make last byte of module information available
+* net/sfc/base: prevent access to the NIC config before probe
+* net/sfc/base: properly align on line continuation
+* net/sfc/base: remove Falcon-specific concurrency check
+* net/sfc: do not skip RSS configuration step on reconfigure
+* net/sfc: fix an Rx queue double release possibility
+* net/sfc: fix a Tx queue double release possibility
+* net/sfc: make sure that stats name is nul-terminated
+* net/sfc: receive prepared packets even in Rx exception case
+* net/softnic: fix undefined dev info fields
+* net/tap: fix file descriptor check
+* net/thunderx: fix Tx desc corruption in scatter-gather mode
+* net/vhost: fix parameters string
+* net/virtio: fix PCI config error handling
+* net/virtio: fix unchecked return value
+* net/virtio: register/unregister intr handler on start/stop
+* net/virtio-user: check negotiated features before set
+* net/virtio-user: do not reset owner when driver resets
+* net/virtio-user: fix typo in error message
+* pci: fix parsing of address without function number
+* ring: remove signed type flip-flopping
+* ring: remove useless variables
+* spinlock/x86: move stack declaration before code
+* table: fix casting cuckoo hash function
+* test/crypto: fix number of queue pairs
+* test/event: check burst mode capability
+* test/hash: fix bucket size in perf test
+* test/hash: fix build
+* test: release ring resources after PMD perf test
+* test/reorder: fix out of bound access
+* usertools: check for lspci dependency
+* vfio: do not needlessly setup device in secondary process
+* vhost: fix corner case for enqueue operation
+* vhost: remove unneeded null pointer check
diff --git a/doc/guides/sample_app_ug/compiling.rst b/doc/guides/sample_app_ug/compiling.rst
index 8bedaa79..3fe71311 100644
--- a/doc/guides/sample_app_ug/compiling.rst
+++ b/doc/guides/sample_app_ug/compiling.rst
@@ -36,7 +36,6 @@ This section explains how to compile the DPDK sample applications.
To compile all the sample applications
--------------------------------------
-
Set the path to DPDK source code if its not set:
.. code-block:: console
@@ -120,3 +119,17 @@ Build the application:
export RTE_TARGET=build
make
+
+To cross compile the sample application(s)
+------------------------------------------
+
+For cross compiling the sample application(s), please append 'CROSS=$(CROSS_COMPILER_PREFIX)' to the 'make' command.
+In example of AARCH64 cross compiling:
+
+ .. code-block:: console
+
+ export RTE_TARGET=build
+ export RTE_SDK=/path/to/rte_sdk
+ make -C examples CROSS=aarch64-linux-gnu-
+ or
+ make CROSS=aarch64-linux-gnu-
diff --git a/doc/guides/sample_app_ug/ip_reassembly.rst b/doc/guides/sample_app_ug/ip_reassembly.rst
index 7dc80d0b..e5b8ef70 100644
--- a/doc/guides/sample_app_ug/ip_reassembly.rst
+++ b/doc/guides/sample_app_ug/ip_reassembly.rst
@@ -50,8 +50,8 @@ There are two key differences from the L2 Forwarding sample application:
* The second difference is that the application differentiates between IP and non-IP traffic by means of offload flags.
-The Longest Prefix Match (LPM for IPv4, LPM6 for IPv6) table is used to store/lookup an outgoing port number, associated with that IPv4 address. Any unmatched packets are forwarded to the originating port.Compiling the Application
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+The Longest Prefix Match (LPM for IPv4, LPM6 for IPv6) table is used to store/lookup an outgoing port number,
+associated with that IPv4 address. Any unmatched packets are forwarded to the originating port.
Compiling the Application
diff --git a/doc/guides/sample_app_ug/ipv4_multicast.rst b/doc/guides/sample_app_ug/ipv4_multicast.rst
index 7a8e7ebc..cca53096 100644
--- a/doc/guides/sample_app_ug/ipv4_multicast.rst
+++ b/doc/guides/sample_app_ug/ipv4_multicast.rst
@@ -346,7 +346,6 @@ It is the mcast_out_pkt() function that performs the packet duplication (either
hdr->pkt.in_port = pkt->pkt.in_port;
hdr->pkt.vlan_macip = pkt->pkt.vlan_macip;
hdr->pkt.hash = pkt->pkt.hash;
- hdr->ol_flags = pkt->ol_flags;
rte_mbuf_sanity_check(hdr, RTE_MBUF_PKT, 1);
return hdr;
diff --git a/doc/guides/sample_app_ug/vhost.rst b/doc/guides/sample_app_ug/vhost.rst
index 5735adbb..99e1a2d8 100644
--- a/doc/guides/sample_app_ug/vhost.rst
+++ b/doc/guides/sample_app_ug/vhost.rst
@@ -107,7 +107,7 @@ could be done by:
.. code-block:: console
modprobe uio_pci_generic
- $RTE_SDK/usertools/dpdk-devbind.py -b=uio_pci_generic 0000:00:04.0
+ $RTE_SDK/usertools/dpdk-devbind.py -b uio_pci_generic 0000:00:04.0
Then start testpmd for packet forwarding testing.
diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst
index 4c0d2ced..e5028137 100644
--- a/doc/guides/testpmd_app_ug/run_app.rst
+++ b/doc/guides/testpmd_app_ug/run_app.rst
@@ -355,7 +355,7 @@ The commandline options are:
io (the default)
mac
- mac_swap
+ macswap
flowgen
rxonly
txonly
diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
index c9ce85c9..3d7478e5 100644
--- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst
+++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst
@@ -2122,13 +2122,16 @@ Add port traffic management private shaper profile
Add the port traffic management private shaper profile::
testpmd> add port tm node shaper profile (port_id) (shaper_profile_id) \
- (tb_rate) (tb_size) (packet_length_adjust)
+ (cmit_tb_rate) (cmit_tb_size) (peak_tb_rate) (peak_tb_size) \
+ (packet_length_adjust)
where:
* ``shaper_profile id``: Shaper profile ID for the new profile.
-* ``tb_rate``: Token bucket rate (bytes per second).
-* ``tb_size``: Token bucket size (bytes).
+* ``cmit_tb_rate``: Committed token bucket rate (bytes per second).
+* ``cmit_tb_size``: Committed token bucket size (bytes).
+* ``peak_tb_rate``: Peak token bucket rate (bytes per second).
+* ``peak_tb_size``: Peak token bucket size (bytes).
* ``packet_length_adjust``: The value (bytes) to be added to the length of
each packet for the purpose of shaping. This parameter value can be used to
correct the packet length with the framing overhead bytes that are consumed
diff --git a/drivers/bus/dpaa/base/fman/fman.c b/drivers/bus/dpaa/base/fman/fman.c
index a9c88ddc..4403ff5d 100644
--- a/drivers/bus/dpaa/base/fman/fman.c
+++ b/drivers/bus/dpaa/base/fman/fman.c
@@ -46,6 +46,7 @@
#include <fman.h>
#include <of.h>
#include <rte_dpaa_logs.h>
+#include <rte_string_fns.h>
#define QMI_PORT_REGS_OFFSET 0x400
@@ -216,7 +217,7 @@ fman_if_init(const struct device_node *dpa_node)
}
memset(__if, 0, sizeof(*__if));
INIT_LIST_HEAD(&__if->__if.bpool_list);
- strncpy(__if->node_path, dpa_node->full_name, PATH_MAX - 1);
+ strlcpy(__if->node_path, dpa_node->full_name, PATH_MAX - 1);
__if->node_path[PATH_MAX - 1] = '\0';
/* Obtain the MAC node used by this interface except macless */
diff --git a/drivers/bus/dpaa/base/qbman/qman.c b/drivers/bus/dpaa/base/qbman/qman.c
index b8511103..84f0cb0b 100644
--- a/drivers/bus/dpaa/base/qbman/qman.c
+++ b/drivers/bus/dpaa/base/qbman/qman.c
@@ -344,9 +344,9 @@ loop:
if (!msg)
return 0;
}
- if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
/* We aren't draining anything but FQRNIs */
- pr_err("Found verb 0x%x in MR\n", msg->verb);
+ pr_err("Found verb 0x%x in MR\n", msg->ern.verb);
return -1;
}
qm_mr_next(p);
@@ -513,7 +513,7 @@ static inline void qm_mr_pvb_update(struct qm_portal *portal)
/* when accessing 'verb', use __raw_readb() to ensure that compiler
* inlining doesn't try to optimise out "excess reads".
*/
- if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ if ((__raw_readb(&res->ern.verb) & QM_MR_VERB_VBIT) == mr->vbit) {
mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
if (!mr->pi)
mr->vbit ^= QM_MR_VERB_VBIT;
@@ -810,7 +810,7 @@ mr_loop:
goto mr_done;
swapped_msg = *msg;
hw_fd_to_cpu(&swapped_msg.ern.fd);
- verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ verb = msg->ern.verb & QM_MR_VERB_TYPE_MASK;
/* The message is a software ERN iff the 0x20 bit is set */
if (verb & 0x20) {
switch (verb) {
@@ -1504,7 +1504,7 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags)
*/
struct qm_mr_entry msg;
- msg.verb = QM_MR_VERB_FQRNI;
+ msg.ern.verb = QM_MR_VERB_FQRNI;
msg.fq.fqs = mcr->alterfq.fqs;
msg.fq.fqid = fq->fqid;
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
@@ -2389,7 +2389,7 @@ int qman_shutdown_fq(u32 fqid)
qm_mr_pvb_update(low_p);
msg = qm_mr_current(low_p);
while (msg) {
- if ((msg->verb &
+ if ((msg->ern.verb &
QM_MR_VERB_TYPE_MASK)
== QM_MR_VERB_FQRN)
found_fqrn = 1;
@@ -2457,7 +2457,7 @@ int qman_shutdown_fq(u32 fqid)
qm_mr_pvb_update(low_p);
msg = qm_mr_current(low_p);
while (msg) {
- if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
+ if ((msg->ern.verb & QM_MR_VERB_TYPE_MASK) ==
QM_MR_VERB_FQRL)
orl_empty = 1;
qm_mr_next(low_p);
diff --git a/drivers/bus/dpaa/include/fsl_qman.h b/drivers/bus/dpaa/include/fsl_qman.h
index 72556dc1..5d00bc73 100644
--- a/drivers/bus/dpaa/include/fsl_qman.h
+++ b/drivers/bus/dpaa/include/fsl_qman.h
@@ -316,20 +316,20 @@ static inline dma_addr_t qm_sg_addr(const struct qm_sg_entry *sg)
} while (0)
/* See 1.5.8.1: "Enqueue Command" */
-struct qm_eqcr_entry {
+struct __rte_aligned(8) qm_eqcr_entry {
u8 __dont_write_directly__verb;
u8 dca;
u16 seqnum;
u32 orp; /* 24-bit */
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
+ struct qm_fd fd; /* this has alignment 8 */
u8 __reserved3[32];
} __packed;
/* "Frame Dequeue Response" */
-struct qm_dqrr_entry {
+struct __rte_aligned(8) qm_dqrr_entry {
u8 verb;
u8 stat;
u16 seqnum; /* 15-bit */
@@ -337,7 +337,7 @@ struct qm_dqrr_entry {
u8 __reserved2[3];
u32 fqid; /* 24-bit */
u32 contextB;
- struct qm_fd fd;
+ struct qm_fd fd; /* this has alignment 8 */
u8 __reserved4[32];
};
@@ -355,18 +355,19 @@ struct qm_dqrr_entry {
/* "ERN Message Response" */
/* "FQ State Change Notification" */
struct qm_mr_entry {
- u8 verb;
union {
struct {
+ u8 verb;
u8 dca;
u16 seqnum;
u8 rc; /* Rejection Code */
u32 orp:24;
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
- } __packed ern;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) ern;
struct {
+ u8 verb;
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
u8 colour:2; /* See QM_MR_DCERN_COLOUR_* */
u8 __reserved1:4;
@@ -381,18 +382,19 @@ struct qm_mr_entry {
u32 __reserved3:24;
u32 fqid; /* 24-bit */
u32 tag;
- struct qm_fd fd;
- } __packed dcern;
+ struct qm_fd fd; /* this has alignment 8 */
+ } __packed __rte_aligned(8) dcern;
struct {
+ u8 verb;
u8 fqs; /* Frame Queue Status */
u8 __reserved1[6];
u32 fqid; /* 24-bit */
u32 contextB;
u8 __reserved2[16];
- } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */
+ } __packed __rte_aligned(8) fq; /* FQRN/FQRNI/FQRL/FQPN */
};
u8 __reserved2[32];
-} __packed;
+} __packed __rte_aligned(8);
#define QM_MR_VERB_VBIT 0x80
/*
* ERNs originating from direct-connect portals ("dcern") use 0x20 as a verb
diff --git a/drivers/bus/pci/linux/pci.c b/drivers/bus/pci/linux/pci.c
index 44440f22..aabaa630 100644
--- a/drivers/bus/pci/linux/pci.c
+++ b/drivers/bus/pci/linux/pci.c
@@ -63,7 +63,8 @@
extern struct rte_pci_bus rte_pci_bus;
static int
-pci_get_kernel_driver_by_path(const char *filename, char *dri_name)
+pci_get_kernel_driver_by_path(const char *filename, char *dri_name,
+ size_t len)
{
int count;
char path[PATH_MAX];
@@ -84,7 +85,7 @@ pci_get_kernel_driver_by_path(const char *filename, char *dri_name)
name = strrchr(path, '/');
if (name) {
- strncpy(dri_name, name + 1, strlen(name + 1) + 1);
+ strlcpy(dri_name, name + 1, len);
return 0;
}
@@ -340,7 +341,7 @@ pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
/* parse driver */
snprintf(filename, sizeof(filename), "%s/driver", dirname);
- ret = pci_get_kernel_driver_by_path(filename, driver);
+ ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver));
if (ret < 0) {
RTE_LOG(ERR, EAL, "Fail to get kernel driver\n");
free(dev);
@@ -703,23 +704,22 @@ rte_pci_get_iommu_class(void)
int rte_pci_read_config(const struct rte_pci_device *device,
void *buf, size_t len, off_t offset)
{
+ char devname[RTE_DEV_NAME_MAX_LEN] = "";
const struct rte_intr_handle *intr_handle = &device->intr_handle;
- switch (intr_handle->type) {
- case RTE_INTR_HANDLE_UIO:
- case RTE_INTR_HANDLE_UIO_INTX:
+ switch (device->kdrv) {
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
return pci_uio_read_config(intr_handle, buf, len, offset);
-
#ifdef VFIO_PRESENT
- case RTE_INTR_HANDLE_VFIO_MSIX:
- case RTE_INTR_HANDLE_VFIO_MSI:
- case RTE_INTR_HANDLE_VFIO_LEGACY:
+ case RTE_KDRV_VFIO:
return pci_vfio_read_config(intr_handle, buf, len, offset);
#endif
default:
+ rte_pci_device_name(&device->addr, devname,
+ RTE_DEV_NAME_MAX_LEN);
RTE_LOG(ERR, EAL,
- "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ "Unknown driver type for %s\n", devname);
return -1;
}
}
@@ -728,23 +728,22 @@ int rte_pci_read_config(const struct rte_pci_device *device,
int rte_pci_write_config(const struct rte_pci_device *device,
const void *buf, size_t len, off_t offset)
{
+ char devname[RTE_DEV_NAME_MAX_LEN] = "";
const struct rte_intr_handle *intr_handle = &device->intr_handle;
- switch (intr_handle->type) {
- case RTE_INTR_HANDLE_UIO:
- case RTE_INTR_HANDLE_UIO_INTX:
+ switch (device->kdrv) {
+ case RTE_KDRV_IGB_UIO:
+ case RTE_KDRV_UIO_GENERIC:
return pci_uio_write_config(intr_handle, buf, len, offset);
-
#ifdef VFIO_PRESENT
- case RTE_INTR_HANDLE_VFIO_MSIX:
- case RTE_INTR_HANDLE_VFIO_MSI:
- case RTE_INTR_HANDLE_VFIO_LEGACY:
+ case RTE_KDRV_VFIO:
return pci_vfio_write_config(intr_handle, buf, len, offset);
#endif
default:
+ rte_pci_device_name(&device->addr, devname,
+ RTE_DEV_NAME_MAX_LEN);
RTE_LOG(ERR, EAL,
- "Unknown handle type of fd %d\n",
- intr_handle->fd);
+ "Unknown driver type for %s\n", devname);
return -1;
}
}
diff --git a/drivers/bus/pci/linux/pci_uio.c b/drivers/bus/pci/linux/pci_uio.c
index 92b7f027..39176ac7 100644
--- a/drivers/bus/pci/linux/pci_uio.c
+++ b/drivers/bus/pci/linux/pci_uio.c
@@ -328,7 +328,7 @@ pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
loc->function, res_idx);
/* allocate memory to keep path */
- maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
+ maps[map_idx].path = rte_malloc(NULL, sizeof(devname), 0);
if (maps[map_idx].path == NULL) {
RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
strerror(errno));
diff --git a/drivers/bus/pci/linux/pci_vfio.c b/drivers/bus/pci/linux/pci_vfio.c
index 745db260..65489728 100644
--- a/drivers/bus/pci/linux/pci_vfio.c
+++ b/drivers/bus/pci/linux/pci_vfio.c
@@ -580,11 +580,6 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
loc->domain, loc->bus, loc->devid, loc->function);
- ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
- &vfio_dev_fd, &device_info);
- if (ret)
- return ret;
-
/* if we're in a secondary process, just find our tailq entry */
TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
if (rte_pci_addr_cmp(&vfio_res->pci_addr,
@@ -596,9 +591,14 @@ pci_vfio_map_resource_secondary(struct rte_pci_device *dev)
if (vfio_res == NULL) {
RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
pci_addr);
- goto err_vfio_dev_fd;
+ return -1;
}
+ ret = rte_vfio_setup_device(rte_pci_get_sysfs_path(), pci_addr,
+ &vfio_dev_fd, &device_info);
+ if (ret)
+ return ret;
+
/* map BARs */
maps = vfio_res->maps;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 70043897..f5bb6aa9 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -663,22 +663,30 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint8_t digest_idx = qp->digest_idx;
do {
- /* Get next operation to process from ingress queue */
- retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
- if (retval < 0)
- break;
-
/* Get next free mb job struct from mb manager */
job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
if (unlikely(job == NULL)) {
/* if no free mb job structs we need to flush mb_mgr */
processed_jobs += flush_mb_mgr(qp,
&ops[processed_jobs],
- (nb_ops - processed_jobs) - 1);
+ nb_ops - processed_jobs);
+
+ if (nb_ops == processed_jobs)
+ break;
job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
}
+ /*
+ * Get next operation to process from ingress queue.
+ * There is no need to return the job to the MB_MGR
+ * if there are no more operations to process, since the MB_MGR
+ * can use that pointer again in next get_next calls.
+ */
+ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (retval < 0)
+ break;
+
retval = set_mb_job_params(job, qp, op, &digest_idx);
if (unlikely(retval != 0)) {
qp->stats.dequeue_err_count++;
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c b/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
index 434cf850..69e1ccba 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
+++ b/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
@@ -58,9 +58,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 12,
.max = 16,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -78,9 +78,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 16,
+ .min = 12,
.max = 16,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -98,9 +98,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 12,
.max = 20,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -118,9 +118,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 20,
+ .min = 12,
.max = 20,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -138,9 +138,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 28,
+ .min = 12,
.max = 28,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -158,9 +158,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 12,
.max = 32,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -178,9 +178,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 32,
+ .min = 12,
.max = 32,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -198,9 +198,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 12,
.max = 48,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -218,9 +218,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 48,
+ .min = 12,
.max = 48,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -238,9 +238,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 64,
- .max = 64,
- .increment = 0
+ .min = 12,
+ .max = 48,
+ .increment = 4
},
}, }
}, }
@@ -258,8 +258,8 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 64,
- .max = 64,
+ .min = 12,
+ .max = 48,
.increment = 0
},
}, }
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index fcba1197..07acd7b9 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -37,6 +37,7 @@
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <rte_reorder.h>
+#include <rte_string_fns.h>
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
@@ -342,7 +343,7 @@ parse_name_arg(const char *key __rte_unused,
return -EINVAL;
}
- strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+ strlcpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
return 0;
}
diff --git a/drivers/event/sw/sw_evdev_scheduler.c b/drivers/event/sw/sw_evdev_scheduler.c
index 8a2c9d4f..2d25cf0b 100644
--- a/drivers/event/sw/sw_evdev_scheduler.c
+++ b/drivers/event/sw/sw_evdev_scheduler.c
@@ -79,9 +79,11 @@ sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
int cq = fid->cq;
if (cq < 0) {
- uint32_t cq_idx = qid->cq_next_tx++;
- if (qid->cq_next_tx == qid->cq_num_mapped_cqs)
+ uint32_t cq_idx;
+ if (qid->cq_next_tx >= qid->cq_num_mapped_cqs)
qid->cq_next_tx = 0;
+ cq_idx = qid->cq_next_tx++;
+
cq = qid->cq_map[cq_idx];
/* find least used */
@@ -168,9 +170,10 @@ sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
do {
if (++cq_check_count > qid->cq_num_mapped_cqs)
goto exit;
- cq = qid->cq_map[cq_idx];
- if (++cq_idx == qid->cq_num_mapped_cqs)
+ if (cq_idx >= qid->cq_num_mapped_cqs)
cq_idx = 0;
+ cq = qid->cq_map[cq_idx++];
+
} while (rte_event_ring_free_count(
sw->ports[cq].cq_worker_ring) == 0 ||
sw->ports[cq].inflights == SW_PORT_HIST_LIST);
@@ -248,7 +251,7 @@ sw_schedule_qid_to_cq(struct sw_evdev *sw)
int iq_num = PKT_MASK_TO_IQ(qid->iq_pkt_mask);
/* zero mapped CQs indicates directed */
- if (iq_num >= SW_IQS_MAX)
+ if (iq_num >= SW_IQS_MAX || qid->cq_num_mapped_cqs == 0)
continue;
uint32_t pkts_done = 0;
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index e58684d6..42fa510f 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -34,6 +34,7 @@
#define BNX2X_PMD_VERSION_REVISION 5
#define BNX2X_PMD_VERSION_PATCH 1
+#ifdef RTE_LIBRTE_BNX2X_DEBUG
static inline const char *
bnx2x_pmd_version(void)
{
@@ -49,6 +50,7 @@ bnx2x_pmd_version(void)
return version;
}
+#endif
static z_stream zlib_stream;
@@ -114,6 +116,7 @@ static void bnx2x_pf_disable(struct bnx2x_softc *sc);
static void bnx2x_update_rx_prod(struct bnx2x_softc *sc,
struct bnx2x_fastpath *fp,
uint16_t rx_bd_prod, uint16_t rx_cq_prod);
+static void bnx2x_link_report_locked(struct bnx2x_softc *sc);
static void bnx2x_link_report(struct bnx2x_softc *sc);
void bnx2x_link_status_update(struct bnx2x_softc *sc);
static int bnx2x_alloc_mem(struct bnx2x_softc *sc);
@@ -180,13 +183,14 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
SOCKET_ID_ANY,
0, align);
if (z == NULL) {
- PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg);
+ PMD_DRV_LOG(ERR, sc, "DMA alloc failed for %s", msg);
return -ENOMEM;
}
dma->paddr = (uint64_t) z->iova;
dma->vaddr = z->addr;
- PMD_DRV_LOG(DEBUG, "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
+ PMD_DRV_LOG(DEBUG, sc,
+ "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
return 0;
}
@@ -199,11 +203,16 @@ static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
uint32_t hw_lock_control_reg;
int cnt;
- PMD_INIT_FUNC_TRACE();
+#ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if (resource)
+ PMD_INIT_FUNC_TRACE(sc);
+#else
+ PMD_INIT_FUNC_TRACE(sc);
+#endif
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE",
resource);
return -1;
@@ -219,7 +228,7 @@ static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
/* validate the resource is not already taken */
lock_status = REG_RD(sc, hw_lock_control_reg);
if (lock_status & resource_bit) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"resource in use (status 0x%x bit 0x%x)",
lock_status, resource_bit);
return -1;
@@ -235,7 +244,8 @@ static int bnx2x_acquire_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
DELAY(5000);
}
- PMD_DRV_LOG(NOTICE, "Resource lock timeout!");
+ PMD_DRV_LOG(NOTICE, sc, "Resource 0x%x resource_bit 0x%x lock timeout!",
+ resource, resource_bit);
return -1;
}
@@ -246,13 +256,18 @@ static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
int func = SC_FUNC(sc);
uint32_t hw_lock_control_reg;
- PMD_INIT_FUNC_TRACE();
+#ifndef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
+ if (resource)
+ PMD_INIT_FUNC_TRACE(sc);
+#else
+ PMD_INIT_FUNC_TRACE(sc);
+#endif
/* validate the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- PMD_DRV_LOG(NOTICE,
- "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE",
- resource);
+ PMD_DRV_LOG(NOTICE, sc,
+ "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)"
+ " resource_bit 0x%x", resource, resource_bit);
return -1;
}
@@ -266,7 +281,7 @@ static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
/* validate the resource is currently taken */
lock_status = REG_RD(sc, hw_lock_control_reg);
if (!(lock_status & resource_bit)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"resource not in use (status 0x%x bit 0x%x)",
lock_status, resource_bit);
return -1;
@@ -276,6 +291,18 @@ static int bnx2x_release_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
return 0;
}
+static void bnx2x_acquire_phy_lock(struct bnx2x_softc *sc)
+{
+ BNX2X_PHY_LOCK(sc);
+ bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_MDIO);
+}
+
+static void bnx2x_release_phy_lock(struct bnx2x_softc *sc)
+{
+ bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_MDIO);
+ BNX2X_PHY_UNLOCK(sc);
+}
+
/* copy command into DMAE command memory and set DMAE command Go */
void bnx2x_post_dmae(struct bnx2x_softc *sc, struct dmae_command *dmae, int idx)
{
@@ -368,7 +395,7 @@ bnx2x_issue_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae)
if (!timeout ||
(sc->recovery_state != BNX2X_RECOVERY_DONE &&
sc->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
- PMD_DRV_LOG(INFO, "DMAE timeout!");
+ PMD_DRV_LOG(INFO, sc, "DMAE timeout!");
return DMAE_TIMEOUT;
}
@@ -377,7 +404,7 @@ bnx2x_issue_dmae_with_comp(struct bnx2x_softc *sc, struct dmae_command *dmae)
}
if (*wb_comp & DMAE_PCI_ERR_FLAG) {
- PMD_DRV_LOG(INFO, "DMAE PCI error!");
+ PMD_DRV_LOG(INFO, sc, "DMAE PCI error!");
return DMAE_PCI_ERROR;
}
@@ -536,7 +563,7 @@ void
elink_cb_event_log(__rte_unused struct bnx2x_softc *sc,
__rte_unused const elink_log_id_t elink_log_id, ...)
{
- PMD_DRV_LOG(DEBUG, "ELINK EVENT LOG (%d)", elink_log_id);
+ PMD_DRV_LOG(DEBUG, sc, "ELINK EVENT LOG (%d)", elink_log_id);
}
static int bnx2x_set_spio(struct bnx2x_softc *sc, int spio, uint32_t mode)
@@ -545,7 +572,7 @@ static int bnx2x_set_spio(struct bnx2x_softc *sc, int spio, uint32_t mode)
/* Only 2 SPIOs are configurable */
if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
- PMD_DRV_LOG(NOTICE, "Invalid SPIO 0x%x", spio);
+ PMD_DRV_LOG(NOTICE, sc, "Invalid SPIO 0x%x", spio);
return -1;
}
@@ -595,7 +622,7 @@ static int bnx2x_gpio_read(struct bnx2x_softc *sc, int gpio_num, uint8_t port)
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
- PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num);
+ PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num);
return -1;
}
@@ -620,7 +647,7 @@ bnx2x_gpio_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode, uint8_t po
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
- PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num);
+ PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num);
return -1;
}
@@ -689,7 +716,8 @@ bnx2x_gpio_mult_write(struct bnx2x_softc *sc, uint8_t pins, uint32_t mode)
break;
default:
- PMD_DRV_LOG(NOTICE, "Invalid GPIO mode assignment %d", mode);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Invalid GPIO mode assignment %d", mode);
bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO);
return -1;
}
@@ -715,7 +743,7 @@ bnx2x_gpio_int_write(struct bnx2x_softc *sc, int gpio_num, uint32_t mode,
uint32_t gpio_reg;
if (gpio_num > MISC_REGISTERS_GPIO_3) {
- PMD_DRV_LOG(NOTICE, "Invalid GPIO %d", gpio_num);
+ PMD_DRV_LOG(NOTICE, sc, "Invalid GPIO %d", gpio_num);
return -1;
}
@@ -792,7 +820,7 @@ elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param)
SHMEM_WR(sc, func_mb[mb_idx].drv_mb_param, param);
SHMEM_WR(sc, func_mb[mb_idx].drv_mb_header, (command | seq));
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"wrote command 0x%08x to FW MB param 0x%08x",
(command | seq), param);
@@ -807,7 +835,7 @@ elink_cb_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param)
rc &= FW_MSG_CODE_MASK;
} else {
/* Ruh-roh! */
- PMD_DRV_LOG(NOTICE, "FW failed to respond!");
+ PMD_DRV_LOG(NOTICE, sc, "FW failed to respond!");
rc = 0;
}
@@ -1025,12 +1053,12 @@ bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi,
if (common) {
if (!atomic_load_acq_long(&sc->eq_spq_left)) {
- PMD_DRV_LOG(INFO, "EQ ring is full!");
+ PMD_DRV_LOG(INFO, sc, "EQ ring is full!");
return -1;
}
} else {
if (!atomic_load_acq_long(&sc->cq_spq_left)) {
- PMD_DRV_LOG(INFO, "SPQ ring is full!");
+ PMD_DRV_LOG(INFO, sc, "SPQ ring is full!");
return -1;
}
}
@@ -1063,7 +1091,7 @@ bnx2x_sp_post(struct bnx2x_softc *sc, int command, int cid, uint32_t data_hi,
atomic_subtract_acq_long(&sc->cq_spq_left, 1);
}
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x"
"data (%x:%x) type(0x%x) left (CQ, EQ) (%lx,%lx)",
sc->spq_prod_idx,
@@ -1136,44 +1164,45 @@ bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
enum ecore_queue_cmd drv_cmd = ECORE_Q_CMD_MAX;
struct ecore_queue_sp_obj *q_obj = &BNX2X_SP_OBJ(sc, fp).q_obj;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"fp=%d cid=%d got ramrod #%d state is %x type is %d",
fp->index, cid, command, sc->state,
rr_cqe->ramrod_cqe.ramrod_type);
switch (command) {
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
- PMD_DRV_LOG(DEBUG, "got UPDATE ramrod. CID %d", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got UPDATE ramrod. CID %d", cid);
drv_cmd = ECORE_Q_CMD_UPDATE;
break;
case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
- PMD_DRV_LOG(DEBUG, "got MULTI[%d] setup ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] setup ramrod", cid);
drv_cmd = ECORE_Q_CMD_SETUP;
break;
case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
- PMD_DRV_LOG(DEBUG, "got MULTI[%d] tx-only setup ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc,
+ "got MULTI[%d] tx-only setup ramrod", cid);
drv_cmd = ECORE_Q_CMD_SETUP_TX_ONLY;
break;
case (RAMROD_CMD_ID_ETH_HALT):
- PMD_DRV_LOG(DEBUG, "got MULTI[%d] halt ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] halt ramrod", cid);
drv_cmd = ECORE_Q_CMD_HALT;
break;
case (RAMROD_CMD_ID_ETH_TERMINATE):
- PMD_DRV_LOG(DEBUG, "got MULTI[%d] teminate ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] teminate ramrod", cid);
drv_cmd = ECORE_Q_CMD_TERMINATE;
break;
case (RAMROD_CMD_ID_ETH_EMPTY):
- PMD_DRV_LOG(DEBUG, "got MULTI[%d] empty ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] empty ramrod", cid);
drv_cmd = ECORE_Q_CMD_EMPTY;
break;
default:
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"ERROR: unexpected MC reply (%d)"
"on fp[%d]", command, fp->index);
return;
@@ -1195,7 +1224,7 @@ bnx2x_sp_event(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
atomic_add_acq_long(&sc->cq_spq_left, 1);
- PMD_DRV_LOG(DEBUG, "sc->cq_spq_left 0x%lx",
+ PMD_DRV_LOG(DEBUG, sc, "sc->cq_spq_left 0x%lx",
atomic_load_acq_long(&sc->cq_spq_left));
}
@@ -1391,7 +1420,7 @@ bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj,
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
if (rc < 0)
- PMD_DRV_LOG(ERR, "Failed to delete MACs (%d)", rc);
+ PMD_DRV_LOG(ERR, sc, "Failed to delete MACs (%d)", rc);
return rc;
}
@@ -1542,13 +1571,13 @@ static int bnx2x_nic_load_no_mcp(struct bnx2x_softc *sc)
int path = SC_PATH(sc);
int port = SC_PORT(sc);
- PMD_DRV_LOG(INFO, "NO MCP - load counts[%d] %d, %d, %d",
+ PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
load_count[path][0]++;
load_count[path][1 + port]++;
- PMD_DRV_LOG(INFO, "NO MCP - new load counts[%d] %d, %d, %d",
+ PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
if (load_count[path][0] == 1)
@@ -1565,12 +1594,12 @@ static int bnx2x_nic_unload_no_mcp(struct bnx2x_softc *sc)
int port = SC_PORT(sc);
int path = SC_PATH(sc);
- PMD_DRV_LOG(INFO, "NO MCP - load counts[%d] %d, %d, %d",
+ PMD_DRV_LOG(INFO, sc, "NO MCP - load counts[%d] %d, %d, %d",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
load_count[path][0]--;
load_count[path][1 + port]--;
- PMD_DRV_LOG(INFO, "NO MCP - new load counts[%d] %d, %d, %d",
+ PMD_DRV_LOG(INFO, sc, "NO MCP - new load counts[%d] %d, %d, %d",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
if (load_count[path][0] == 0) {
@@ -1650,7 +1679,7 @@ static int bnx2x_func_wait_started(struct bnx2x_softc *sc)
*/
struct ecore_func_state_params func_params = { NULL };
- PMD_DRV_LOG(NOTICE, "Unexpected function state! "
+ PMD_DRV_LOG(NOTICE, sc, "Unexpected function state! "
"Forcing STARTED-->TX_STOPPED-->STARTED");
func_params.f_obj = &sc->func_obj;
@@ -1674,7 +1703,7 @@ static int bnx2x_stop_queue(struct bnx2x_softc *sc, int index)
struct ecore_queue_state_params q_params = { NULL };
int rc;
- PMD_DRV_LOG(DEBUG, "stopping queue %d cid %d", index, fp->index);
+ PMD_DRV_LOG(DEBUG, sc, "stopping queue %d cid %d", index, fp->index);
q_params.q_obj = &sc->sp_objs[fp->index].q_obj;
/* We want to wait for completion in this context */
@@ -1725,7 +1754,7 @@ static uint8_t bnx2x_wait_sp_comp(struct bnx2x_softc *sc, unsigned long mask)
tmp = atomic_load_acq_long(&sc->sp_state);
if (tmp & mask) {
- PMD_DRV_LOG(INFO, "Filtering completion timed out: "
+ PMD_DRV_LOG(INFO, sc, "Filtering completion timed out: "
"sp_state 0x%lx, mask 0x%lx", tmp, mask);
return FALSE;
}
@@ -1751,7 +1780,7 @@ static int bnx2x_func_stop(struct bnx2x_softc *sc)
*/
rc = ecore_func_state_change(sc, &func_params);
if (rc) {
- PMD_DRV_LOG(NOTICE, "FUNC_STOP ramrod failed. "
+ PMD_DRV_LOG(NOTICE, sc, "FUNC_STOP ramrod failed. "
"Running a dry transaction");
bnx2x_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
return ecore_func_state_change(sc, &func_params);
@@ -1800,14 +1829,16 @@ bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_li
rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_ETH_MAC,
FALSE);
if (rc < 0) {
- PMD_DRV_LOG(NOTICE, "Failed to delete all ETH MACs (%d)", rc);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Failed to delete all ETH MACs (%d)", rc);
}
/* Clean up UC list */
rc = bnx2x_del_all_macs(sc, &sc->sp_objs[0].mac_obj, ECORE_UC_LIST_MAC,
TRUE);
if (rc < 0) {
- PMD_DRV_LOG(NOTICE, "Failed to delete UC MACs list (%d)", rc);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Failed to delete UC MACs list (%d)", rc);
}
/* Disable LLH */
@@ -1830,7 +1861,7 @@ bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_li
rparam.mcast_obj = &sc->mcast_obj;
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
if (rc < 0) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Failed to send DEL MCAST command (%d)", rc);
}
@@ -1847,7 +1878,7 @@ bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_li
*/
rc = bnx2x_func_wait_started(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "bnx2x_func_wait_started failed");
+ PMD_DRV_LOG(NOTICE, sc, "bnx2x_func_wait_started failed");
}
/*
@@ -1865,14 +1896,14 @@ bnx2x_chip_cleanup(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_li
* very wrong has happen.
*/
if (!bnx2x_wait_sp_comp(sc, ~0x0UL)) {
- PMD_DRV_LOG(NOTICE, "Common slow path ramrods got stuck!");
+ PMD_DRV_LOG(NOTICE, sc, "Common slow path ramrods got stuck!");
}
unload_error:
rc = bnx2x_func_stop(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Function stop failed!");
+ PMD_DRV_LOG(NOTICE, sc, "Function stop failed!");
}
/* disable HW interrupts */
@@ -1881,7 +1912,7 @@ unload_error:
/* Reset the chip */
rc = bnx2x_reset_hw(sc, reset_code);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Hardware reset failed");
+ PMD_DRV_LOG(NOTICE, sc, "Hardware reset failed");
}
/* Report UNLOAD_DONE to MCP */
@@ -1892,7 +1923,7 @@ static void bnx2x_disable_close_the_gate(struct bnx2x_softc *sc)
{
uint32_t val;
- PMD_DRV_LOG(DEBUG, "Disabling 'close the gates'");
+ PMD_DRV_LOG(DEBUG, sc, "Disabling 'close the gates'");
val = REG_RD(sc, MISC_REG_AEU_GENERAL_MASK);
val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
@@ -1923,7 +1954,7 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
rc = mac_obj->delete_all(sc, &sc->sp_objs->mac_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc != 0) {
- PMD_DRV_LOG(NOTICE, "Failed to clean ETH MACs (%d)", rc);
+ PMD_DRV_LOG(NOTICE, sc, "Failed to clean ETH MACs (%d)", rc);
}
/* Cleanup UC list */
@@ -1931,7 +1962,8 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
bnx2x_set_bit(ECORE_UC_LIST_MAC, &vlan_mac_flags);
rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags);
if (rc != 0) {
- PMD_DRV_LOG(NOTICE, "Failed to clean UC list MACs (%d)", rc);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Failed to clean UC list MACs (%d)", rc);
}
/* Now clean mcast object... */
@@ -1942,7 +1974,7 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
/* Add a DEL command... */
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_DEL);
if (rc < 0) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Failed to send DEL MCAST command (%d)", rc);
}
@@ -1951,7 +1983,7 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
while (rc != 0) {
if (rc < 0) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Failed to clean MCAST object (%d)", rc);
return;
}
@@ -1968,7 +2000,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
uint8_t global = FALSE;
uint32_t val;
- PMD_DRV_LOG(DEBUG, "Starting NIC unload...");
+ PMD_DRV_LOG(DEBUG, sc, "Starting NIC unload...");
/* mark driver as unloaded in shmem2 */
if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
@@ -1992,7 +2024,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
bnx2x_release_leader_lock(sc);
mb();
- PMD_DRV_LOG(NOTICE, "Can't unload in closed or error state");
+ PMD_DRV_LOG(NOTICE, sc, "Can't unload in closed or error state");
return -1;
}
@@ -2097,7 +2129,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
bnx2x_disable_close_the_gate(sc);
}
- PMD_DRV_LOG(DEBUG, "Ended NIC unload");
+ PMD_DRV_LOG(DEBUG, sc, "Ended NIC unload");
return 0;
}
@@ -2245,7 +2277,7 @@ static void bnx2x_ilt_set_info(struct bnx2x_softc *sc)
struct ecore_ilt *ilt = sc->ilt;
uint16_t line = 0;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
ilt->start_line = FUNC_ILT_BASE(SC_FUNC(sc));
@@ -2399,7 +2431,7 @@ static int bnx2x_alloc_mem(struct bnx2x_softc *sc)
bnx2x_alloc_ilt_lines_mem(sc);
if (ecore_ilt_mem_op(sc, ILT_MEMOP_ALLOC)) {
- PMD_DRV_LOG(NOTICE, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed");
+ PMD_DRV_LOG(NOTICE, sc, "ecore_ilt_mem_op ILT_MEMOP_ALLOC failed");
bnx2x_free_mem(sc);
return -1;
}
@@ -2602,7 +2634,7 @@ static void bnx2x_set_pf_load(struct bnx2x_softc *sc)
bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RECOVERY_REG);
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
val = REG_RD(sc, BNX2X_RECOVERY_GLOB_REG);
@@ -2655,14 +2687,14 @@ static uint8_t bnx2x_clear_pf_load(struct bnx2x_softc *sc)
/* send load requrest to mcp and analyze response */
static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code)
{
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
/* init fw_seq */
sc->fw_seq =
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
- PMD_DRV_LOG(DEBUG, "initial fw_seq 0x%04x", sc->fw_seq);
+ PMD_DRV_LOG(DEBUG, sc, "initial fw_seq 0x%04x", sc->fw_seq);
#ifdef BNX2X_PULSE
/* get the current FW pulse sequence */
@@ -2681,13 +2713,13 @@ static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code)
/* if the MCP fails to respond we must abort */
if (!(*load_code)) {
- PMD_DRV_LOG(NOTICE, "MCP response failure!");
+ PMD_DRV_LOG(NOTICE, sc, "MCP response failure!");
return -1;
}
/* if MCP refused then must abort */
if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
- PMD_DRV_LOG(NOTICE, "MCP refused load request");
+ PMD_DRV_LOG(NOTICE, sc, "MCP refused load request");
return -1;
}
@@ -2714,12 +2746,12 @@ static int bnx2x_nic_load_analyze_req(struct bnx2x_softc *sc, uint32_t load_code
/* read loaded FW from chip */
loaded_fw = REG_RD(sc, XSEM_REG_PRAM);
- PMD_DRV_LOG(DEBUG, "loaded FW 0x%08x / my FW 0x%08x",
+ PMD_DRV_LOG(DEBUG, sc, "loaded FW 0x%08x / my FW 0x%08x",
loaded_fw, my_fw);
/* abort nic load if version mismatch */
if (my_fw != loaded_fw) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"FW 0x%08x already loaded (mine is 0x%08x)",
loaded_fw, my_fw);
return -1;
@@ -2734,7 +2766,7 @@ static void bnx2x_nic_load_pmf(struct bnx2x_softc *sc, uint32_t load_code)
{
uint32_t ncsi_oem_data_addr;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
(load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
@@ -2749,7 +2781,7 @@ static void bnx2x_nic_load_pmf(struct bnx2x_softc *sc, uint32_t load_code)
sc->port.pmf = 0;
}
- PMD_DRV_LOG(DEBUG, "pmf %d", sc->port.pmf);
+ PMD_DRV_LOG(DEBUG, sc, "pmf %d", sc->port.pmf);
if (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) {
if (SHMEM2_HAS(sc, ncsi_oem_data_addr)) {
@@ -2792,10 +2824,10 @@ static void bnx2x_read_mf_cfg(struct bnx2x_softc *sc)
if (sc->devinfo.mf_info.mf_config[SC_VN(sc)] &
FUNC_MF_CFG_FUNC_DISABLED) {
- PMD_DRV_LOG(DEBUG, "mf_cfg function disabled");
+ PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled");
sc->flags |= BNX2X_MF_FUNC_DIS;
} else {
- PMD_DRV_LOG(DEBUG, "mf_cfg function enabled");
+ PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled");
sc->flags &= ~BNX2X_MF_FUNC_DIS;
}
}
@@ -2816,7 +2848,7 @@ static int bnx2x_acquire_alr(struct bnx2x_softc *sc)
}
if (!(val & (1L << 31))) {
- PMD_DRV_LOG(NOTICE, "Cannot acquire MCP access lock register");
+ PMD_DRV_LOG(NOTICE, sc, "Cannot acquire MCP access lock register");
return -1;
}
@@ -2844,7 +2876,7 @@ static void bnx2x_fan_failure(struct bnx2x_softc *sc)
ext_phy_config);
/* log the failure */
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"Fan Failure has caused the driver to shutdown "
"the card to prevent permanent damage. "
"Please contact OEM Support for assistance");
@@ -2901,7 +2933,7 @@ static void bnx2x_link_attn(struct bnx2x_softc *sc)
}
}
- bnx2x_link_report(sc);
+ bnx2x_link_report_locked(sc);
if (IS_MF(sc)) {
bnx2x_link_sync_notify(sc);
@@ -2922,7 +2954,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
uint32_t cnt;
if (sc->attn_state & asserted) {
- PMD_DRV_LOG(ERR, "IGU ERROR attn=0x%08x", asserted);
+ PMD_DRV_LOG(ERR, sc, "IGU ERROR attn=0x%08x", asserted);
}
bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
@@ -2940,6 +2972,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
if (asserted & ATTN_HARD_WIRED_MASK) {
if (asserted & ATTN_NIG_FOR_FUNC) {
+ bnx2x_acquire_phy_lock(sc);
/* save nig interrupt mask */
nig_mask = REG_RD(sc, nig_int_mask_addr);
@@ -2954,45 +2987,45 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
}
if (asserted & ATTN_SW_TIMER_4_FUNC) {
- PMD_DRV_LOG(DEBUG, "ATTN_SW_TIMER_4_FUNC!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_SW_TIMER_4_FUNC!");
}
if (asserted & GPIO_2_FUNC) {
- PMD_DRV_LOG(DEBUG, "GPIO_2_FUNC!");
+ PMD_DRV_LOG(DEBUG, sc, "GPIO_2_FUNC!");
}
if (asserted & GPIO_3_FUNC) {
- PMD_DRV_LOG(DEBUG, "GPIO_3_FUNC!");
+ PMD_DRV_LOG(DEBUG, sc, "GPIO_3_FUNC!");
}
if (asserted & GPIO_4_FUNC) {
- PMD_DRV_LOG(DEBUG, "GPIO_4_FUNC!");
+ PMD_DRV_LOG(DEBUG, sc, "GPIO_4_FUNC!");
}
if (port == 0) {
if (asserted & ATTN_GENERAL_ATTN_1) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_1!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_1!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_2) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_2!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_2!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_3) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_3!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_3!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
}
} else {
if (asserted & ATTN_GENERAL_ATTN_4) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_4!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_4!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_5) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_5!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_5!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
}
if (asserted & ATTN_GENERAL_ATTN_6) {
- PMD_DRV_LOG(DEBUG, "ATTN_GENERAL_ATTN_6!");
+ PMD_DRV_LOG(DEBUG, sc, "ATTN_GENERAL_ATTN_6!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
}
}
@@ -3006,7 +3039,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER * 8);
}
- PMD_DRV_LOG(DEBUG, "about to mask 0x%08x at %s addr 0x%08x",
+ PMD_DRV_LOG(DEBUG, sc, "about to mask 0x%08x at %s addr 0x%08x",
asserted,
(sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU",
reg_addr);
@@ -3028,7 +3061,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
&& (++cnt < MAX_IGU_ATTN_ACK_TO));
if (!igu_acked) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Failed to verify IGU ack on time");
}
@@ -3037,6 +3070,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x_softc *sc, uint32_t asserted)
REG_WR(sc, nig_int_mask_addr, nig_mask);
+ bnx2x_release_phy_lock(sc);
}
}
@@ -3044,7 +3078,7 @@ static void
bnx2x_print_next_block(__rte_unused struct bnx2x_softc *sc, __rte_unused int idx,
__rte_unused const char *blk)
{
- PMD_DRV_LOG(INFO, "%s%s", idx ? ", " : "", blk);
+ PMD_DRV_LOG(INFO, sc, "%s%s", idx ? ", " : "", blk);
}
static int
@@ -3352,7 +3386,7 @@ bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print,
(sig[2] & HW_PRTY_ASSERT_SET_2) ||
(sig[3] & HW_PRTY_ASSERT_SET_3) ||
(sig[4] & HW_PRTY_ASSERT_SET_4)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Parity error: HW block parity attention:"
"[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x",
(uint32_t) (sig[0] & HW_PRTY_ASSERT_SET_0),
@@ -3362,7 +3396,7 @@ bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print,
(uint32_t) (sig[4] & HW_PRTY_ASSERT_SET_4));
if (print)
- PMD_DRV_LOG(INFO, "Parity errors detected in blocks: ");
+ PMD_DRV_LOG(INFO, sc, "Parity errors detected in blocks: ");
par_num =
bnx2x_check_blocks_with_parity0(sc, sig[0] &
@@ -3386,7 +3420,7 @@ bnx2x_parity_attn(struct bnx2x_softc *sc, uint8_t * global, uint8_t print,
par_num, print);
if (print)
- PMD_DRV_LOG(INFO, "");
+ PMD_DRV_LOG(INFO, sc, "");
return TRUE;
}
@@ -3418,64 +3452,64 @@ static void bnx2x_attn_int_deasserted4(struct bnx2x_softc *sc, uint32_t attn)
if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
val = REG_RD(sc, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
- PMD_DRV_LOG(INFO, "ERROR: PGLUE hw attention 0x%08x", val);
+ PMD_DRV_LOG(INFO, sc, "ERROR: PGLUE hw attention 0x%08x", val);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN");
if (val &
PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN");
if (val &
PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW");
}
if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
val = REG_RD(sc, ATC_REG_ATC_INT_STS_CLR);
- PMD_DRV_LOG(INFO, "ERROR: ATC hw attention 0x%08x", val);
+ PMD_DRV_LOG(INFO, sc, "ERROR: ATC hw attention 0x%08x", val);
if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ADDRESS_ERROR");
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND");
if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS");
if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT");
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR");
if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU");
}
if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"ERROR: FATAL parity attention set4 0x%08x",
(uint32_t) (attn &
(AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR
@@ -3598,11 +3632,11 @@ static void bnx2x_dcc_event(struct bnx2x_softc *sc, uint32_t dcc_event)
*/
if (sc->devinfo.
mf_info.mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_DISABLED) {
- PMD_DRV_LOG(DEBUG, "mf_cfg function disabled");
+ PMD_DRV_LOG(DEBUG, sc, "mf_cfg function disabled");
sc->flags |= BNX2X_MF_FUNC_DIS;
bnx2x_e1h_disable(sc);
} else {
- PMD_DRV_LOG(DEBUG, "mf_cfg function enabled");
+ PMD_DRV_LOG(DEBUG, sc, "mf_cfg function enabled");
sc->flags &= ~BNX2X_MF_FUNC_DIS;
bnx2x_e1h_enable(sc);
}
@@ -3657,7 +3691,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
last_idx =
REG_RD8(sc, BAR_XSTRORM_INTMEM + XSTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx)
- PMD_DRV_LOG(ERR, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ PMD_DRV_LOG(ERR, sc, "XSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
/* print the asserts */
for (i = 0; i < STORM_ASSERT_ARRAY_SIZE; i++) {
@@ -3679,7 +3713,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
i, row3, row2, row1, row0);
rc++;
@@ -3692,7 +3726,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
last_idx =
REG_RD8(sc, BAR_TSTRORM_INTMEM + TSTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx) {
- PMD_DRV_LOG(ERR, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ PMD_DRV_LOG(ERR, sc, "TSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
}
/* print the asserts */
@@ -3715,7 +3749,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
i, row3, row2, row1, row0);
rc++;
@@ -3728,7 +3762,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
last_idx =
REG_RD8(sc, BAR_CSTRORM_INTMEM + CSTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx) {
- PMD_DRV_LOG(ERR, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ PMD_DRV_LOG(ERR, sc, "CSTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
}
/* print the asserts */
@@ -3751,7 +3785,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
i, row3, row2, row1, row0);
rc++;
@@ -3764,7 +3798,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
last_idx =
REG_RD8(sc, BAR_USTRORM_INTMEM + USTORM_ASSERT_LIST_INDEX_OFFSET);
if (last_idx) {
- PMD_DRV_LOG(ERR, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
+ PMD_DRV_LOG(ERR, sc, "USTORM_ASSERT_LIST_INDEX 0x%x", last_idx);
}
/* print the asserts */
@@ -3787,7 +3821,7 @@ static int bnx2x_mc_assert(struct bnx2x_softc *sc)
12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x",
i, row3, row2, row1, row0);
rc++;
@@ -3836,8 +3870,10 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn)
if (sc->link_vars.periodic_flags &
ELINK_PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
+ bnx2x_acquire_phy_lock(sc);
sc->link_vars.periodic_flags &=
~ELINK_PERIODIC_FLAGS_LINK_EVENT;
+ bnx2x_release_phy_lock(sc);
if (IS_MF(sc)) {
bnx2x_link_sync_notify(sc);
}
@@ -3852,7 +3888,7 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn)
} else if (attn & BNX2X_MC_ASSERT_BITS) {
- PMD_DRV_LOG(ERR, "MC assert!");
+ PMD_DRV_LOG(ERR, sc, "MC assert!");
bnx2x_mc_assert(sc);
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_10, 0);
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_9, 0);
@@ -3862,24 +3898,24 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x_softc *sc, uint32_t attn)
} else if (attn & BNX2X_MCP_ASSERT) {
- PMD_DRV_LOG(ERR, "MCP assert!");
+ PMD_DRV_LOG(ERR, sc, "MCP assert!");
REG_WR(sc, MISC_REG_AEU_GENERAL_ATTN_11, 0);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Unknown HW assert! (attn 0x%08x)", attn);
}
}
if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
- PMD_DRV_LOG(ERR, "LATCHED attention 0x%08x (masked)", attn);
+ PMD_DRV_LOG(ERR, sc, "LATCHED attention 0x%08x (masked)", attn);
if (attn & BNX2X_GRC_TIMEOUT) {
val = REG_RD(sc, MISC_REG_GRC_TIMEOUT_ATTN);
- PMD_DRV_LOG(ERR, "GRC time-out 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "GRC time-out 0x%08x", val);
}
if (attn & BNX2X_GRC_RSV) {
val = REG_RD(sc, MISC_REG_GRC_RSV_ATTN);
- PMD_DRV_LOG(ERR, "GRC reserved 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "GRC reserved 0x%08x", val);
}
REG_WR(sc, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
}
@@ -3894,24 +3930,24 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
val = REG_RD(sc, CFC_REG_CFC_INT_STS_CLR);
- PMD_DRV_LOG(ERR, "CFC hw attention 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "CFC hw attention 0x%08x", val);
/* CFC error attention */
if (val & 0x2) {
- PMD_DRV_LOG(ERR, "FATAL error from CFC");
+ PMD_DRV_LOG(ERR, sc, "FATAL error from CFC");
}
}
if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_0);
- PMD_DRV_LOG(ERR, "PXP hw attention-0 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "PXP hw attention-0 0x%08x", val);
/* RQ_USDMDP_FIFO_OVERFLOW */
if (val & 0x18000) {
- PMD_DRV_LOG(ERR, "FATAL error from PXP");
+ PMD_DRV_LOG(ERR, sc, "FATAL error from PXP");
}
if (!CHIP_IS_E1x(sc)) {
val = REG_RD(sc, PXP_REG_PXP_INT_STS_CLR_1);
- PMD_DRV_LOG(ERR, "PXP hw attention-1 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "PXP hw attention-1 0x%08x", val);
}
}
#define PXP2_EOP_ERROR_BIT PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR
@@ -3939,7 +3975,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_CLR_0);
/* print the register, since no one can restore it */
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"PXP2_REG_PXP2_INT_STS_CLR_0 0x%08x", val0);
/*
@@ -3947,7 +3983,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
* then notify
*/
if (val0 & PXP2_EOP_ERROR_BIT) {
- PMD_DRV_LOG(ERR, "PXP2_WR_PGLUE_EOP_ERROR");
+ PMD_DRV_LOG(ERR, sc, "PXP2_WR_PGLUE_EOP_ERROR");
/*
* if only PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR is
@@ -3968,7 +4004,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
REG_WR(sc, reg_offset, val);
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"FATAL HW block attention set2 0x%x",
(uint32_t) (attn & HW_INTERRUT_ASSERT_SET_2));
rte_panic("HW block attention set2");
@@ -3983,10 +4019,10 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn)
if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
val = REG_RD(sc, DORQ_REG_DORQ_INT_STS_CLR);
- PMD_DRV_LOG(ERR, "DB hw attention 0x%08x", val);
+ PMD_DRV_LOG(ERR, sc, "DB hw attention 0x%08x", val);
/* DORQ discard attention */
if (val & 0x2) {
- PMD_DRV_LOG(ERR, "FATAL error from DORQ");
+ PMD_DRV_LOG(ERR, sc, "FATAL error from DORQ");
}
}
@@ -3998,7 +4034,7 @@ static void bnx2x_attn_int_deasserted1(struct bnx2x_softc *sc, uint32_t attn)
val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
REG_WR(sc, reg_offset, val);
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"FATAL HW block attention set1 0x%08x",
(uint32_t) (attn & HW_INTERRUT_ASSERT_SET_1));
rte_panic("HW block attention set1");
@@ -4019,7 +4055,7 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn)
val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
REG_WR(sc, reg_offset, val);
- PMD_DRV_LOG(WARNING, "SPIO5 hw attention");
+ PMD_DRV_LOG(WARNING, sc, "SPIO5 hw attention");
/* Fan failure attention */
elink_hw_reset_phy(&sc->link_params);
@@ -4027,7 +4063,9 @@ static void bnx2x_attn_int_deasserted0(struct bnx2x_softc *sc, uint32_t attn)
}
if ((attn & sc->link_vars.aeu_int_mask) && sc->port.pmf) {
+ bnx2x_acquire_phy_lock(sc);
elink_handle_module_detect_int(&sc->link_params);
+ bnx2x_release_phy_lock(sc);
}
if (attn & HW_INTERRUT_ASSERT_SET_0) {
@@ -4109,14 +4147,14 @@ static void bnx2x_attn_int_deasserted(struct bnx2x_softc *sc, uint32_t deasserte
}
val = ~deasserted;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"about to mask 0x%08x at %s addr 0x%08x", val,
(sc->devinfo.int_block == INT_BLOCK_HC) ? "HC" : "IGU",
reg_addr);
REG_WR(sc, reg_addr, val);
if (~sc->attn_state & deasserted) {
- PMD_DRV_LOG(ERR, "IGU error");
+ PMD_DRV_LOG(ERR, sc, "IGU error");
}
reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
@@ -4146,12 +4184,12 @@ static void bnx2x_attn_int(struct bnx2x_softc *sc)
uint32_t asserted = attn_bits & ~attn_ack & ~attn_state;
uint32_t deasserted = ~attn_bits & attn_ack & attn_state;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"attn_bits 0x%08x attn_ack 0x%08x asserted 0x%08x deasserted 0x%08x",
attn_bits, attn_ack, asserted, deasserted);
if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state)) {
- PMD_DRV_LOG(ERR, "BAD attention state");
+ PMD_DRV_LOG(ERR, sc, "BAD attention state");
}
/* handle bits that were raised */
@@ -4208,7 +4246,7 @@ static void bnx2x_handle_mcast_eqe(struct bnx2x_softc *sc)
if (sc->mcast_obj.check_pending(&sc->mcast_obj)) {
rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT);
if (rc < 0) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"Failed to send pending mcast commands (%d)",
rc);
}
@@ -4228,17 +4266,17 @@ bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *e
switch (le32toh(elem->message.data.eth_event.echo) >> BNX2X_SWCID_SHIFT) {
case ECORE_FILTER_MAC_PENDING:
- PMD_DRV_LOG(DEBUG, "Got SETUP_MAC completions");
+ PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MAC completions");
vlan_mac_obj = &sc->sp_objs[cid].mac_obj;
break;
case ECORE_FILTER_MCAST_PENDING:
- PMD_DRV_LOG(DEBUG, "Got SETUP_MCAST completions");
+ PMD_DRV_LOG(DEBUG, sc, "Got SETUP_MCAST completions");
bnx2x_handle_mcast_eqe(sc);
return;
default:
- PMD_DRV_LOG(NOTICE, "Unsupported classification command: %d",
+ PMD_DRV_LOG(NOTICE, sc, "Unsupported classification command: %d",
elem->message.data.eth_event.echo);
return;
}
@@ -4246,9 +4284,10 @@ bnx2x_handle_classification_eqe(struct bnx2x_softc *sc, union event_ring_elem *e
rc = vlan_mac_obj->complete(sc, vlan_mac_obj, elem, &ramrod_flags);
if (rc < 0) {
- PMD_DRV_LOG(NOTICE, "Failed to schedule new commands (%d)", rc);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Failed to schedule new commands (%d)", rc);
} else if (rc > 0) {
- PMD_DRV_LOG(DEBUG, "Scheduled next pending commands...");
+ PMD_DRV_LOG(DEBUG, sc, "Scheduled next pending commands...");
}
}
@@ -4312,7 +4351,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_STAT_QUERY:
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "got statistics completion event %d",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "got statistics completion event %d",
sc->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
@@ -4320,7 +4359,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case EVENT_RING_OPCODE_CFC_DEL:
/* handle according to cid range */
/* we may want to verify here that the sc state is HALTING */
- PMD_DRV_LOG(DEBUG, "got delete ramrod for MULTI[%d]",
+ PMD_DRV_LOG(DEBUG, sc, "got delete ramrod for MULTI[%d]",
cid);
q_obj = bnx2x_cid_to_q_obj(sc, cid);
if (q_obj->complete_cmd(sc, q_obj, ECORE_Q_CMD_CFC_DEL)) {
@@ -4329,14 +4368,14 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
goto next_spqe;
case EVENT_RING_OPCODE_STOP_TRAFFIC:
- PMD_DRV_LOG(DEBUG, "got STOP TRAFFIC");
+ PMD_DRV_LOG(DEBUG, sc, "got STOP TRAFFIC");
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_TX_STOP)) {
break;
}
goto next_spqe;
case EVENT_RING_OPCODE_START_TRAFFIC:
- PMD_DRV_LOG(DEBUG, "got START TRAFFIC");
+ PMD_DRV_LOG(DEBUG, sc, "got START TRAFFIC");
if (f_obj->complete_cmd
(sc, f_obj, ECORE_F_CMD_TX_START)) {
break;
@@ -4346,7 +4385,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case EVENT_RING_OPCODE_FUNCTION_UPDATE:
echo = elem->message.data.function_update_event.echo;
if (echo == SWITCH_UPDATE) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"got FUNC_SWITCH_UPDATE ramrod");
if (f_obj->complete_cmd(sc, f_obj,
ECORE_F_CMD_SWITCH_UPDATE))
@@ -4354,7 +4393,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
break;
}
} else {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"AFEX: ramrod completed FUNCTION_UPDATE");
f_obj->complete_cmd(sc, f_obj,
ECORE_F_CMD_AFEX_UPDATE);
@@ -4370,14 +4409,14 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_START:
- PMD_DRV_LOG(DEBUG, "got FUNC_START ramrod");
+ PMD_DRV_LOG(DEBUG, sc, "got FUNC_START ramrod");
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_START)) {
break;
}
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_STOP:
- PMD_DRV_LOG(DEBUG, "got FUNC_STOP ramrod");
+ PMD_DRV_LOG(DEBUG, sc, "got FUNC_STOP ramrod");
if (f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_STOP)) {
break;
}
@@ -4389,7 +4428,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | BNX2X_STATE_OPENING_WAITING_PORT):
cid =
elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
- PMD_DRV_LOG(DEBUG, "got RSS_UPDATE ramrod. CID %d",
+ PMD_DRV_LOG(DEBUG, sc, "got RSS_UPDATE ramrod. CID %d",
cid);
rss_raw->clear_pending(rss_raw);
break;
@@ -4400,7 +4439,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_DIAG):
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES | BNX2X_STATE_CLOSING_WAITING_HALT):
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"got (un)set mac ramrod");
bnx2x_handle_classification_eqe(sc, elem);
break;
@@ -4408,7 +4447,7 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_DIAG):
case (EVENT_RING_OPCODE_MULTICAST_RULES | BNX2X_STATE_CLOSING_WAITING_HALT):
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"got mcast ramrod");
bnx2x_handle_mcast_eqe(sc);
break;
@@ -4416,14 +4455,14 @@ static void bnx2x_eq_int(struct bnx2x_softc *sc)
case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_OPEN):
case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_DIAG):
case (EVENT_RING_OPCODE_FILTERS_RULES | BNX2X_STATE_CLOSING_WAITING_HALT):
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"got rx_mode ramrod");
bnx2x_handle_rx_mode_eqe(sc);
break;
default:
/* unknown event log error and continue */
- PMD_DRV_LOG(INFO, "Unknown EQ event %d, sc->state 0x%x",
+ PMD_DRV_LOG(INFO, sc, "Unknown EQ event %d, sc->state 0x%x",
elem->message.opcode, sc->state);
}
@@ -4449,12 +4488,16 @@ static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc)
uint16_t status;
int rc = 0;
+ PMD_DRV_LOG(DEBUG, sc, "---> SP TASK <---");
+
/* what work needs to be performed? */
status = bnx2x_update_dsb_idx(sc);
+ PMD_DRV_LOG(DEBUG, sc, "dsb status 0x%04x", status);
+
/* HW attentions */
if (status & BNX2X_DEF_SB_ATT_IDX) {
- PMD_DRV_LOG(DEBUG, "---> ATTN INTR <---");
+ PMD_DRV_LOG(DEBUG, sc, "---> ATTN INTR <---");
bnx2x_attn_int(sc);
status &= ~BNX2X_DEF_SB_ATT_IDX;
rc = 1;
@@ -4463,7 +4506,7 @@ static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc)
/* SP events: STAT_QUERY and others */
if (status & BNX2X_DEF_SB_IDX) {
/* handle EQ completions */
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "---> EQ INTR <---");
+ PMD_DRV_LOG(DEBUG, sc, "---> EQ INTR <---");
bnx2x_eq_int(sc);
bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
le16toh(sc->def_idx), IGU_INT_NOP, 1);
@@ -4472,7 +4515,7 @@ static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc)
/* if status is non zero then something went wrong */
if (unlikely(status)) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"Got an unknown SP interrupt! (0x%04x)", status);
}
@@ -4488,7 +4531,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
struct bnx2x_softc *sc = fp->sc;
uint8_t more_rx = FALSE;
- PMD_DRV_LOG(DEBUG, "---> FP TASK QUEUE (%d) <--", fp->index);
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc,
+ "---> FP TASK QUEUE (%d) <--", fp->index);
/* update the fastpath index */
bnx2x_update_fp_sb_idx(fp);
@@ -4538,25 +4582,31 @@ int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp)
return 0;
}
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "Interrupt status 0x%04x", status);
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "Interrupt status 0x%04x", status);
//bnx2x_dump_status_block(sc);
FOR_EACH_ETH_QUEUE(sc, i) {
fp = &sc->fp[i];
mask = (0x2 << (fp->index + CNIC_SUPPORT(sc)));
if (status & mask) {
+ /* acknowledge and disable further fastpath interrupts */
+ bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID,
+ 0, IGU_INT_DISABLE, 0);
bnx2x_handle_fp_tq(fp, scan_fp);
status &= ~mask;
}
}
if (unlikely(status & 0x1)) {
+ /* acknowledge and disable further slowpath interrupts */
+ bnx2x_ack_sb(sc, sc->igu_dsb_id, USTORM_ID,
+ 0, IGU_INT_DISABLE, 0);
rc = bnx2x_handle_sp_tq(sc);
status &= ~0x1;
}
if (unlikely(status)) {
- PMD_DRV_LOG(WARNING,
+ PMD_DRV_LOG(WARNING, sc,
"Unexpected fastpath status (0x%08x)!", status);
}
@@ -4592,7 +4642,7 @@ static void bnx2x_init_func_obj(struct bnx2x_softc *sc)
{
sc->dmae_ready = 0;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
ecore_init_func_obj(sc,
&sc->func_obj,
@@ -4608,7 +4658,7 @@ static int bnx2x_init_hw(struct bnx2x_softc *sc, uint32_t load_code)
struct ecore_func_state_params func_params = { NULL };
int rc;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
/* prepare the parameters for function state transitions */
bnx2x_set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
@@ -5197,7 +5247,7 @@ static void bnx2x_init_internal(struct bnx2x_softc *sc, uint32_t load_code)
break;
default:
- PMD_DRV_LOG(NOTICE, "Unknown load_code (0x%x) from MCP",
+ PMD_DRV_LOG(NOTICE, sc, "Unknown load_code (0x%x) from MCP",
load_code);
break;
}
@@ -5288,7 +5338,7 @@ bnx2x_extract_max_cfg(__rte_unused struct bnx2x_softc *sc, uint32_t mf_cfg)
FUNC_MF_CFG_MAX_BW_SHIFT);
if (!max_cfg) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Max BW configured to 0 - using 100 instead");
max_cfg = 100;
}
@@ -5552,7 +5602,7 @@ static void bnx2x_igu_int_enable(struct bnx2x_softc *sc)
val |= IGU_PF_CONF_FUNC_EN;
- PMD_DRV_LOG(DEBUG, "write 0x%x to IGU mode %s",
+ PMD_DRV_LOG(DEBUG, sc, "write 0x%x to IGU mode %s",
val, ((msix) ? "MSI-X" : ((msi) ? "MSI" : "INTx")));
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
@@ -5600,7 +5650,7 @@ static void bnx2x_hc_int_disable(struct bnx2x_softc *sc)
REG_WR(sc, addr, val);
if (REG_RD(sc, addr) != val) {
- PMD_DRV_LOG(ERR, "proper val not read from HC IGU!");
+ PMD_DRV_LOG(ERR, sc, "proper val not read from HC IGU!");
}
}
@@ -5611,14 +5661,14 @@ static void bnx2x_igu_int_disable(struct bnx2x_softc *sc)
val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
IGU_PF_CONF_INT_LINE_EN | IGU_PF_CONF_ATTN_BIT_EN);
- PMD_DRV_LOG(DEBUG, "write %x to IGU", val);
+ PMD_DRV_LOG(DEBUG, sc, "write %x to IGU", val);
/* flush all outstanding writes */
mb();
REG_WR(sc, IGU_REG_PF_CONFIGURATION, val);
if (REG_RD(sc, IGU_REG_PF_CONFIGURATION) != val) {
- PMD_DRV_LOG(ERR, "proper val not read from IGU!");
+ PMD_DRV_LOG(ERR, sc, "proper val not read from IGU!");
}
}
@@ -5635,7 +5685,7 @@ static void bnx2x_nic_init(struct bnx2x_softc *sc, int load_code)
{
int i;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
for (i = 0; i < sc->num_queues; i++) {
bnx2x_init_eth_fp(sc, i);
@@ -5765,7 +5815,7 @@ static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state)
/* If there is no power capability, silently succeed */
if (!(sc->devinfo.pcie_cap_flags & BNX2X_PM_CAPABLE_FLAG)) {
- PMD_DRV_LOG(WARNING, "No power capability");
+ PMD_DRV_LOG(WARNING, sc, "No power capability");
return 0;
}
@@ -5810,7 +5860,7 @@ static int bnx2x_set_power_state(struct bnx2x_softc *sc, uint8_t state)
break;
default:
- PMD_DRV_LOG(NOTICE, "Can't support PCI power state = %d",
+ PMD_DRV_LOG(NOTICE, sc, "Can't support PCI power state = %d",
state);
return -1;
}
@@ -5828,7 +5878,7 @@ static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
return FALSE;
@@ -5848,7 +5898,7 @@ static uint8_t bnx2x_trylock_hw_lock(struct bnx2x_softc *sc, uint32_t resource)
return TRUE;
}
- PMD_DRV_LOG(NOTICE, "Failed to get a resource lock 0x%x", resource);
+ PMD_DRV_LOG(NOTICE, sc, "Failed to get a resource lock 0x%x", resource);
return FALSE;
}
@@ -5941,7 +5991,7 @@ static int bnx2x_er_poll_igu_vq(struct bnx2x_softc *sc)
} while (cnt-- > 0);
if (cnt <= 0) {
- PMD_DRV_LOG(NOTICE, "Still pending IGU requests bits=0x%08x!",
+ PMD_DRV_LOG(NOTICE, sc, "Still pending IGU requests bits=0x%08x!",
pend_bits);
return -1;
}
@@ -6022,7 +6072,7 @@ static int bnx2x_init_shmem(struct bnx2x_softc *sc)
} while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
- PMD_DRV_LOG(NOTICE, "BAD MCP validity signature");
+ PMD_DRV_LOG(NOTICE, sc, "BAD MCP validity signature");
return -1;
}
@@ -6177,7 +6227,7 @@ static int bnx2x_process_kill(struct bnx2x_softc *sc, uint8_t global)
} while (cnt-- > 0);
if (cnt <= 0) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"ERROR: Tetris buffer didn't get empty or there "
"are still outstanding read requests after 1s! "
"sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, "
@@ -6250,14 +6300,14 @@ static int bnx2x_leader_reset(struct bnx2x_softc *sc)
load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_REQ,
DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
if (!load_code) {
- PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting");
rc = -1;
goto exit_leader_reset;
}
if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
(load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"MCP unexpected response, aborting");
rc = -1;
goto exit_leader_reset2;
@@ -6265,7 +6315,7 @@ static int bnx2x_leader_reset(struct bnx2x_softc *sc)
load_code = bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
if (!load_code) {
- PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting");
rc = -1;
goto exit_leader_reset2;
}
@@ -6273,7 +6323,7 @@ static int bnx2x_leader_reset(struct bnx2x_softc *sc)
/* try to recover after the failure */
if (bnx2x_process_kill(sc, global)) {
- PMD_DRV_LOG(NOTICE, "Something bad occurred on engine %d!",
+ PMD_DRV_LOG(NOTICE, sc, "Something bad occurred on engine %d!",
SC_PATH(sc));
rc = -1;
goto exit_leader_reset2;
@@ -6432,12 +6482,12 @@ bnx2x_pf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
/* validate rings have enough entries to cross high thresholds */
if (sc->dropless_fc &&
pause->bd_th_hi + FW_PREFETCH_CNT > sc->rx_ring_size) {
- PMD_DRV_LOG(WARNING, "rx bd ring threshold limit");
+ PMD_DRV_LOG(WARNING, sc, "rx bd ring threshold limit");
}
if (sc->dropless_fc &&
pause->rcq_th_hi + FW_PREFETCH_CNT > USABLE_RCQ_ENTRIES(rxq)) {
- PMD_DRV_LOG(WARNING, "rcq ring threshold limit");
+ PMD_DRV_LOG(WARNING, sc, "rcq ring threshold limit");
}
pause->pri_map = 1;
@@ -6508,7 +6558,7 @@ bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t lea
struct ecore_queue_setup_params *setup_params = &q_params.params.setup;
int rc;
- PMD_DRV_LOG(DEBUG, "setting up queue %d", fp->index);
+ PMD_DRV_LOG(DEBUG, sc, "setting up queue %d", fp->index);
bnx2x_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
@@ -6526,11 +6576,11 @@ bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t lea
/* Change the state to INIT */
rc = ecore_queue_state_change(sc, &q_params);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Queue(%d) INIT failed", fp->index);
+ PMD_DRV_LOG(NOTICE, sc, "Queue(%d) INIT failed", fp->index);
return rc;
}
- PMD_DRV_LOG(DEBUG, "init complete");
+ PMD_DRV_LOG(DEBUG, sc, "init complete");
/* now move the Queue to the SETUP state */
memset(setup_params, 0, sizeof(*setup_params));
@@ -6554,7 +6604,7 @@ bnx2x_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, uint8_t lea
/* change the state to SETUP */
rc = ecore_queue_state_change(sc, &q_params);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Queue(%d) SETUP failed", fp->index);
+ PMD_DRV_LOG(NOTICE, sc, "Queue(%d) SETUP failed", fp->index);
return rc;
}
@@ -6682,11 +6732,11 @@ bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac,
rc = ecore_config_vlan_mac(sc, &ramrod_param);
if (rc == ECORE_EXISTS) {
- PMD_DRV_LOG(INFO, "Failed to schedule ADD operations (EEXIST)");
+ PMD_DRV_LOG(INFO, sc, "Failed to schedule ADD operations (EEXIST)");
/* do not treat adding same MAC as error */
rc = 0;
} else if (rc < 0) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"%s MAC failed (%d)", (set ? "Set" : "Delete"), rc);
}
@@ -6697,7 +6747,7 @@ static int bnx2x_set_eth_mac(struct bnx2x_softc *sc, uint8_t set)
{
unsigned long ramrod_flags = 0;
- PMD_DRV_LOG(DEBUG, "Adding Ethernet MAC");
+ PMD_DRV_LOG(DEBUG, sc, "Adding Ethernet MAC");
bnx2x_set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -6853,7 +6903,7 @@ bnx2x_fill_report_data(struct bnx2x_softc *sc, struct bnx2x_link_report_data *da
}
/* report link status to OS, should be called under phy_lock */
-static void bnx2x_link_report(struct bnx2x_softc *sc)
+static void bnx2x_link_report_locked(struct bnx2x_softc *sc)
{
struct bnx2x_link_report_data cur_data;
@@ -6874,14 +6924,19 @@ static void bnx2x_link_report(struct bnx2x_softc *sc)
return;
}
+ PMD_DRV_LOG(INFO, sc, "Change in link status : cur_data = %lx, last_reported_link = %lx\n",
+ cur_data.link_report_flags,
+ sc->last_reported_link.link_report_flags);
+
sc->link_cnt++;
+ PMD_DRV_LOG(INFO, sc, "link status change count = %x\n", sc->link_cnt);
/* report new link params and remember the state for the next time */
rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&cur_data.link_report_flags)) {
- PMD_DRV_LOG(INFO, "NIC Link is Down");
+ PMD_DRV_LOG(INFO, sc, "NIC Link is Down");
} else {
__rte_unused const char *duplex;
__rte_unused const char *flow;
@@ -6921,12 +6976,20 @@ static void bnx2x_link_report(struct bnx2x_softc *sc)
flow = "none";
}
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"NIC Link is Up, %d Mbps %s duplex, Flow control: %s",
cur_data.line_speed, duplex, flow);
}
}
+static void
+bnx2x_link_report(struct bnx2x_softc *sc)
+{
+ bnx2x_acquire_phy_lock(sc);
+ bnx2x_link_report_locked(sc);
+ bnx2x_release_phy_lock(sc);
+}
+
void bnx2x_link_status_update(struct bnx2x_softc *sc)
{
if (sc->state != BNX2X_STATE_OPEN) {
@@ -6992,7 +7055,7 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc)
}
bnx2x_link_report(sc);
} else {
- bnx2x_link_report(sc);
+ bnx2x_link_report_locked(sc);
bnx2x_stats_handle(sc, STATS_EVENT_LINK_UP);
}
}
@@ -7005,6 +7068,8 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
bnx2x_set_requested_fc(sc);
+ bnx2x_acquire_phy_lock(sc);
+
if (load_mode == LOAD_DIAG) {
lp->loopback_mode = ELINK_LOOPBACK_XGXS;
/* Prefer doing PHY loopback at 10G speed, if possible */
@@ -7024,6 +7089,8 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
rc = elink_phy_init(&sc->link_params, &sc->link_vars);
+ bnx2x_release_phy_lock(sc);
+
bnx2x_calc_fc_adv(sc);
if (sc->link_vars.link_up) {
@@ -7062,7 +7129,7 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
{
if ((sc->state != BNX2X_STATE_OPEN) ||
(atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
- PMD_DRV_LOG(INFO, "periodic callout exit (state=0x%x)",
+ PMD_DRV_LOG(INFO, sc, "periodic callout exit (state=0x%x)",
sc->state);
return;
}
@@ -7074,7 +7141,9 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
*/
mb();
if (sc->port.pmf) {
+ bnx2x_acquire_phy_lock(sc);
elink_period_func(&sc->link_params, &sc->link_vars);
+ bnx2x_release_phy_lock(sc);
}
}
#ifdef BNX2X_PULSE
@@ -7099,7 +7168,7 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
if ((drv_pulse != mcp_pulse) &&
(drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
/* someone lost a heartbeat... */
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"drv_pulse (0x%x) != mcp_pulse (0x%x)",
drv_pulse, mcp_pulse);
}
@@ -7115,7 +7184,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
uint32_t load_code = 0;
int i, rc = 0;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
sc->state = BNX2X_STATE_OPENING_WAITING_LOAD;
@@ -7169,7 +7238,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
goto bnx2x_nic_load_error2;
}
} else {
- PMD_DRV_LOG(INFO, "Device has no MCP!");
+ PMD_DRV_LOG(INFO, sc, "Device has no MCP!");
load_code = bnx2x_nic_load_no_mcp(sc);
}
@@ -7181,7 +7250,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
/* Initialize HW */
if (bnx2x_init_hw(sc, load_code) != 0) {
- PMD_DRV_LOG(NOTICE, "HW init failed");
+ PMD_DRV_LOG(NOTICE, sc, "HW init failed");
bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
sc->state = BNX2X_STATE_CLOSED;
rc = -ENXIO;
@@ -7201,7 +7270,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
sc->state = BNX2X_STATE_OPENING_WAITING_PORT;
rc = bnx2x_func_start(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Function start failed!");
+ PMD_DRV_LOG(NOTICE, sc, "Function start failed!");
bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
sc->state = BNX2X_STATE_ERROR;
goto bnx2x_nic_load_error3;
@@ -7212,7 +7281,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
load_code =
bnx2x_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0);
if (!load_code) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"MCP response failure, aborting");
sc->state = BNX2X_STATE_ERROR;
rc = -ENXIO;
@@ -7223,7 +7292,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
rc = bnx2x_setup_leading(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Setup leading failed!");
+ PMD_DRV_LOG(NOTICE, sc, "Setup leading failed!");
sc->state = BNX2X_STATE_ERROR;
goto bnx2x_nic_load_error3;
}
@@ -7235,7 +7304,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
rc = bnx2x_vf_setup_queue(sc, &sc->fp[i], FALSE);
if (rc) {
- PMD_DRV_LOG(NOTICE, "Queue(%d) setup failed", i);
+ PMD_DRV_LOG(NOTICE, sc, "Queue(%d) setup failed", i);
sc->state = BNX2X_STATE_ERROR;
goto bnx2x_nic_load_error3;
}
@@ -7243,7 +7312,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
rc = bnx2x_init_rss_pf(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "PF RSS init failed");
+ PMD_DRV_LOG(NOTICE, sc, "PF RSS init failed");
sc->state = BNX2X_STATE_ERROR;
goto bnx2x_nic_load_error3;
}
@@ -7259,7 +7328,7 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
}
if (rc) {
- PMD_DRV_LOG(NOTICE, "Setting Ethernet MAC failed");
+ PMD_DRV_LOG(NOTICE, sc, "Setting Ethernet MAC failed");
sc->state = BNX2X_STATE_ERROR;
goto bnx2x_nic_load_error3;
}
@@ -7311,13 +7380,13 @@ int bnx2x_nic_load(struct bnx2x_softc *sc)
/* wait for all pending SP commands to complete */
if (IS_PF(sc) && !bnx2x_wait_sp_comp(sc, ~0x0UL)) {
- PMD_DRV_LOG(NOTICE, "Timeout waiting for all SPs to complete!");
+ PMD_DRV_LOG(NOTICE, sc, "Timeout waiting for all SPs to complete!");
bnx2x_periodic_stop(sc);
bnx2x_nic_unload(sc, UNLOAD_CLOSE, FALSE);
return -ENXIO;
}
- PMD_DRV_LOG(DEBUG, "NIC successfully loaded");
+ PMD_DRV_LOG(DEBUG, sc, "NIC successfully loaded");
return 0;
@@ -7366,7 +7435,7 @@ int bnx2x_init(struct bnx2x_softc *sc)
/* Check if the driver is still running and bail out if it is. */
if (sc->state != BNX2X_STATE_CLOSED) {
- PMD_DRV_LOG(DEBUG, "Init called while driver is running!");
+ PMD_DRV_LOG(DEBUG, sc, "Init called while driver is running!");
rc = 0;
goto bnx2x_init_done;
}
@@ -7404,7 +7473,7 @@ int bnx2x_init(struct bnx2x_softc *sc)
&& (!global ||!other_load_status))
&& bnx2x_trylock_leader_lock(sc)
&& !bnx2x_leader_reset(sc)) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"Recovered during init");
break;
}
@@ -7414,7 +7483,7 @@ int bnx2x_init(struct bnx2x_softc *sc)
sc->recovery_state = BNX2X_RECOVERY_FAILED;
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Recovery flow hasn't properly "
"completed yet, try again later. "
"If you still see this message after a "
@@ -7433,7 +7502,7 @@ int bnx2x_init(struct bnx2x_softc *sc)
bnx2x_init_done:
if (rc) {
- PMD_DRV_LOG(NOTICE, "Initialization failed, "
+ PMD_DRV_LOG(NOTICE, sc, "Initialization failed, "
"stack notified driver is NOT running!");
}
@@ -7465,7 +7534,7 @@ static void bnx2x_get_function_num(struct bnx2x_softc *sc)
sc->pfunc_abs = (sc->pfunc_rel | sc->path_id);
}
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Relative function %d, Absolute function %d, Path %d",
sc->pfunc_rel, sc->pfunc_abs, sc->path_id);
}
@@ -7502,14 +7571,14 @@ static uint32_t bnx2x_pcie_capability_read(struct bnx2x_softc *sc, int reg)
/* ensure PCIe capability is enabled */
caps = pci_find_cap(sc, PCIY_EXPRESS, BNX2X_PCI_CAP);
if (NULL != caps) {
- PMD_DRV_LOG(DEBUG, "Found PCIe capability: "
+ PMD_DRV_LOG(DEBUG, sc, "Found PCIe capability: "
"id=0x%04X type=0x%04X addr=0x%08X",
caps->id, caps->type, caps->addr);
pci_read(sc, (caps->addr + reg), &ret, 2);
return ret;
}
- PMD_DRV_LOG(WARNING, "PCIe capability NOT FOUND!!!");
+ PMD_DRV_LOG(WARNING, sc, "PCIe capability NOT FOUND!!!");
return 0;
}
@@ -7527,7 +7596,7 @@ static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc)
*/
static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
{
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
struct bnx2x_pci_cap *caps;
uint16_t link_status;
@@ -7538,7 +7607,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
/* check if PCI Power Management is enabled */
caps = pci_find_cap(sc, PCIY_PMG, BNX2X_PCI_CAP);
if (NULL != caps) {
- PMD_DRV_LOG(DEBUG, "Found PM capability: "
+ PMD_DRV_LOG(DEBUG, sc, "Found PM capability: "
"id=0x%04X type=0x%04X addr=0x%08X",
caps->id, caps->type, caps->addr);
@@ -7552,7 +7621,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
sc->devinfo.pcie_link_width =
((link_status & PCIM_LINK_STA_WIDTH) >> 4);
- PMD_DRV_LOG(DEBUG, "PCIe link speed=%d width=%d",
+ PMD_DRV_LOG(DEBUG, sc, "PCIe link speed=%d width=%d",
sc->devinfo.pcie_link_speed, sc->devinfo.pcie_link_width);
sc->devinfo.pcie_cap_flags |= BNX2X_PCIE_CAPABLE_FLAG;
@@ -7560,7 +7629,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
/* check if MSI capability is enabled */
caps = pci_find_cap(sc, PCIY_MSI, BNX2X_PCI_CAP);
if (NULL != caps) {
- PMD_DRV_LOG(DEBUG, "Found MSI capability at 0x%04x", reg);
+ PMD_DRV_LOG(DEBUG, sc, "Found MSI capability at 0x%04x", reg);
sc->devinfo.pcie_cap_flags |= BNX2X_MSI_CAPABLE_FLAG;
sc->devinfo.pcie_msi_cap_reg = caps->addr;
@@ -7569,7 +7638,7 @@ static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
/* check if MSI-X capability is enabled */
caps = pci_find_cap(sc, PCIY_MSIX, BNX2X_PCI_CAP);
if (NULL != caps) {
- PMD_DRV_LOG(DEBUG, "Found MSI-X capability at 0x%04x", reg);
+ PMD_DRV_LOG(DEBUG, sc, "Found MSI-X capability at 0x%04x", reg);
sc->devinfo.pcie_cap_flags |= BNX2X_MSIX_CAPABLE_FLAG;
sc->devinfo.pcie_msix_cap_reg = caps->addr;
@@ -7589,7 +7658,7 @@ static int bnx2x_get_shmem_mf_cfg_info_sd(struct bnx2x_softc *sc)
mf_info->multi_vnics_mode = 1;
if (!VALID_OVLAN(mf_info->ext_id)) {
- PMD_DRV_LOG(NOTICE, "Invalid VLAN (%d)", mf_info->ext_id);
+ PMD_DRV_LOG(NOTICE, sc, "Invalid VLAN (%d)", mf_info->ext_id);
return 1;
}
@@ -7713,14 +7782,14 @@ static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc)
/* various MF mode sanity checks... */
if (mf_info->mf_config[SC_VN(sc)] & FUNC_MF_CFG_FUNC_HIDE) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Enumerated function %d is marked as hidden",
SC_PORT(sc));
return 1;
}
if ((mf_info->vnics_per_port > 1) && !mf_info->multi_vnics_mode) {
- PMD_DRV_LOG(NOTICE, "vnics_per_port=%d multi_vnics_mode=%d",
+ PMD_DRV_LOG(NOTICE, sc, "vnics_per_port=%d multi_vnics_mode=%d",
mf_info->vnics_per_port, mf_info->multi_vnics_mode);
return 1;
}
@@ -7728,13 +7797,13 @@ static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc)
if (mf_info->mf_mode == MULTI_FUNCTION_SD) {
/* vnic id > 0 must have valid ovlan in switch-dependent mode */
if ((SC_VN(sc) > 0) && !VALID_OVLAN(OVLAN(sc))) {
- PMD_DRV_LOG(NOTICE, "mf_mode=SD vnic_id=%d ovlan=%d",
+ PMD_DRV_LOG(NOTICE, sc, "mf_mode=SD vnic_id=%d ovlan=%d",
SC_VN(sc), OVLAN(sc));
return 1;
}
if (!VALID_OVLAN(OVLAN(sc)) && mf_info->multi_vnics_mode) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"mf_mode=SD multi_vnics_mode=%d ovlan=%d",
mf_info->multi_vnics_mode, OVLAN(sc));
return 1;
@@ -7753,7 +7822,7 @@ static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc)
&& !VALID_OVLAN(ovlan1))
|| ((!mf_info->multi_vnics_mode)
&& VALID_OVLAN(ovlan1)))) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"mf_mode=SD function %d MF config "
"mismatch, multi_vnics_mode=%d ovlan=%d",
i, mf_info->multi_vnics_mode,
@@ -7777,7 +7846,7 @@ static int bnx2x_check_valid_mf_cfg(struct bnx2x_softc *sc)
&& !(mf_cfg2 & FUNC_MF_CFG_FUNC_HIDE)
&& VALID_OVLAN(ovlan2)
&& (ovlan1 == ovlan2)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"mf_mode=SD functions %d and %d "
"have the same ovlan (%d)",
i, j, ovlan1);
@@ -7807,7 +7876,7 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
}
if (sc->devinfo.mf_cfg_base == SHMEM_MF_CFG_ADDR_NONE) {
- PMD_DRV_LOG(NOTICE, "Invalid mf_cfg_base!");
+ PMD_DRV_LOG(NOTICE, sc, "Invalid mf_cfg_base!");
return 1;
}
@@ -7825,7 +7894,7 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
if (mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT) {
mf_info->mf_mode = MULTI_FUNCTION_SI;
} else {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Invalid config for Switch Independent mode");
}
@@ -7841,7 +7910,7 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
mf_info->mf_mode = MULTI_FUNCTION_SD;
} else {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Invalid config for Switch Dependent mode");
}
@@ -7865,14 +7934,14 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
(mac_upper != FUNC_MF_CFG_UPPERMAC_DEFAULT)) {
mf_info->mf_mode = MULTI_FUNCTION_AFEX;
} else {
- PMD_DRV_LOG(NOTICE, "Invalid config for AFEX mode");
+ PMD_DRV_LOG(NOTICE, sc, "Invalid config for AFEX mode");
}
break;
default:
- PMD_DRV_LOG(NOTICE, "Unknown MF mode (0x%08x)",
+ PMD_DRV_LOG(NOTICE, sc, "Unknown MF mode (0x%08x)",
(val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK));
return 1;
@@ -7904,7 +7973,7 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
if (mf_info->mf_mode == SINGLE_FUNCTION) {
/* invalid MF config */
if (SC_VN(sc) >= 1) {
- PMD_DRV_LOG(NOTICE, "VNIC ID >= 1 in SF mode");
+ PMD_DRV_LOG(NOTICE, sc, "VNIC ID >= 1 in SF mode");
return 1;
}
@@ -7933,7 +8002,7 @@ static int bnx2x_get_mf_cfg_info(struct bnx2x_softc *sc)
default:
- PMD_DRV_LOG(NOTICE, "Get MF config failed (mf_mode=0x%08x)",
+ PMD_DRV_LOG(NOTICE, sc, "Get MF config failed (mf_mode=0x%08x)",
mf_info->mf_mode);
return 1;
}
@@ -7961,7 +8030,7 @@ static int bnx2x_get_shmem_info(struct bnx2x_softc *sc)
int port;
uint32_t mac_hi, mac_lo, val;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
port = SC_PORT(sc);
mac_hi = mac_lo = 0;
@@ -8035,7 +8104,7 @@ static int bnx2x_get_shmem_info(struct bnx2x_softc *sc)
if ((mac_lo == 0) && (mac_hi == 0)) {
*sc->mac_addr_str = 0;
- PMD_DRV_LOG(NOTICE, "No Ethernet address programmed!");
+ PMD_DRV_LOG(NOTICE, sc, "No Ethernet address programmed!");
} else {
sc->link_params.mac_addr[0] = (uint8_t) (mac_hi >> 8);
sc->link_params.mac_addr[1] = (uint8_t) (mac_hi);
@@ -8051,7 +8120,8 @@ static int bnx2x_get_shmem_info(struct bnx2x_softc *sc)
sc->link_params.mac_addr[3],
sc->link_params.mac_addr[4],
sc->link_params.mac_addr[5]);
- PMD_DRV_LOG(DEBUG, "Ethernet address: %s", sc->mac_addr_str);
+ PMD_DRV_LOG(DEBUG, sc,
+ "Ethernet address: %s", sc->mac_addr_str);
}
return 0;
@@ -8066,24 +8136,24 @@ static void bnx2x_media_detect(struct bnx2x_softc *sc)
case ELINK_ETH_PHY_XFP_FIBER:
case ELINK_ETH_PHY_KR:
case ELINK_ETH_PHY_CX4:
- PMD_DRV_LOG(INFO, "Found 10GBase-CX4 media.");
+ PMD_DRV_LOG(INFO, sc, "Found 10GBase-CX4 media.");
sc->media = IFM_10G_CX4;
break;
case ELINK_ETH_PHY_DA_TWINAX:
- PMD_DRV_LOG(INFO, "Found 10Gb Twinax media.");
+ PMD_DRV_LOG(INFO, sc, "Found 10Gb Twinax media.");
sc->media = IFM_10G_TWINAX;
break;
case ELINK_ETH_PHY_BASE_T:
- PMD_DRV_LOG(INFO, "Found 10GBase-T media.");
+ PMD_DRV_LOG(INFO, sc, "Found 10GBase-T media.");
sc->media = IFM_10G_T;
break;
case ELINK_ETH_PHY_NOT_PRESENT:
- PMD_DRV_LOG(INFO, "Media not present.");
+ PMD_DRV_LOG(INFO, sc, "Media not present.");
sc->media = 0;
break;
case ELINK_ETH_PHY_UNSPECIFIED:
default:
- PMD_DRV_LOG(INFO, "Unknown media!");
+ PMD_DRV_LOG(INFO, sc, "Unknown media!");
sc->media = 0;
break;
}
@@ -8146,7 +8216,7 @@ static int bnx2x_get_igu_cam_info(struct bnx2x_softc *sc)
sc->igu_sb_cnt = min(sc->igu_sb_cnt, igu_sb_cnt);
if (igu_sb_cnt == 0) {
- PMD_DRV_LOG(ERR, "CAM configuration error");
+ PMD_DRV_LOG(ERR, sc, "CAM configuration error");
return -1;
}
@@ -8183,7 +8253,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
sc->devinfo.chip_id |= 0x1;
}
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"chip_id=0x%08x (num=0x%04x rev=0x%01x metal=0x%02x bond=0x%01x)",
sc->devinfo.chip_id,
((sc->devinfo.chip_id >> 16) & 0xffff),
@@ -8194,7 +8264,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
val = (REG_RD(sc, 0x2874) & 0x55);
if ((sc->devinfo.chip_id & 0x1) || (CHIP_IS_E1H(sc) && (val == 0x55))) {
sc->flags |= BNX2X_ONE_PORT_FLAG;
- PMD_DRV_LOG(DEBUG, "single port device");
+ PMD_DRV_LOG(DEBUG, sc, "single port device");
}
/* set the doorbell size */
@@ -8218,7 +8288,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
sc->devinfo.chip_port_mode =
(val) ? CHIP_4_PORT_MODE : CHIP_2_PORT_MODE;
- PMD_DRV_LOG(DEBUG, "Port mode = %s", (val) ? "4" : "2");
+ PMD_DRV_LOG(DEBUG, sc, "Port mode = %s", (val) ? "4" : "2");
}
/* get the function and path info for the device */
@@ -8233,7 +8303,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
if (!sc->devinfo.shmem_base) {
/* this should ONLY prevent upcoming shmem reads */
- PMD_DRV_LOG(INFO, "MCP not active");
+ PMD_DRV_LOG(INFO, sc, "MCP not active");
sc->flags |= BNX2X_NO_MCP_FLAG;
return 0;
}
@@ -8242,7 +8312,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
val = SHMEM_RD(sc, validity_map[SC_PORT(sc)]);
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
(SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
- PMD_DRV_LOG(NOTICE, "Invalid SHMEM validity signature: 0x%08x",
+ PMD_DRV_LOG(NOTICE, sc, "Invalid SHMEM validity signature: 0x%08x",
val);
return 0;
}
@@ -8255,7 +8325,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
((sc->devinfo.bc_ver >> 24) & 0xff),
((sc->devinfo.bc_ver >> 16) & 0xff),
((sc->devinfo.bc_ver >> 8) & 0xff));
- PMD_DRV_LOG(INFO, "Bootcode version: %s", sc->devinfo.bc_ver_str);
+ PMD_DRV_LOG(INFO, sc, "Bootcode version: %s", sc->devinfo.bc_ver_str);
/* get the bootcode shmem address */
sc->devinfo.mf_cfg_base = bnx2x_get_shmem_mf_cfg_base(sc);
@@ -8310,7 +8380,7 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
}
if (REG_RD(sc, IGU_REG_RESET_MEMORIES)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"FORCING IGU Normal Mode failed!!!");
bnx2x_release_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
return -1;
@@ -8318,10 +8388,10 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
}
if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
- PMD_DRV_LOG(DEBUG, "IGU Backward Compatible Mode");
+ PMD_DRV_LOG(DEBUG, sc, "IGU Backward Compatible Mode");
sc->devinfo.int_block |= INT_BLOCK_MODE_BW_COMP;
} else {
- PMD_DRV_LOG(DEBUG, "IGU Normal Mode");
+ PMD_DRV_LOG(DEBUG, sc, "IGU Normal Mode");
}
rc = bnx2x_get_igu_cam_info(sc);
@@ -8395,7 +8465,7 @@ bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg)
}
if (!(sc->port.supported[0] || sc->port.supported[1])) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid phy config in NVRAM (PHY1=0x%08x PHY2=0x%08x)",
SHMEM_RD(sc,
dev_info.port_hw_config
@@ -8421,7 +8491,7 @@ bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg)
NIG_REG_XGXS0_CTRL_PHY_ADDR + port * 0x18);
break;
default:
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid switch config in"
"link_config=0x%08x",
sc->port.link_config[0]);
@@ -8429,7 +8499,7 @@ bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg)
}
}
- PMD_DRV_LOG(INFO, "PHY addr 0x%08x", sc->port.phy_addr);
+ PMD_DRV_LOG(INFO, sc, "PHY addr 0x%08x", sc->port.phy_addr);
/* mask what we support according to speed_cap_mask per configuration */
for (idx = 0; idx < cfg_size; idx++) {
@@ -8482,7 +8552,7 @@ bnx2x_link_settings_supported(struct bnx2x_softc *sc, uint32_t switch_cfg)
}
}
- PMD_DRV_LOG(INFO, "PHY supported 0=0x%08x 1=0x%08x",
+ PMD_DRV_LOG(INFO, sc, "PHY supported 0=0x%08x 1=0x%08x",
sc->port.supported[0], sc->port.supported[1]);
}
@@ -8541,7 +8611,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_10baseT_Full | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8561,7 +8631,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_10baseT_Half | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8580,7 +8650,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_100baseT_Full | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8600,7 +8670,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_100baseT_Half | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8618,7 +8688,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_1000baseT_Full | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8636,7 +8706,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
sc->port.advertising[idx] |=
(ADVERTISED_2500baseX_Full | ADVERTISED_TP);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8655,7 +8725,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
(ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x",
link_config,
@@ -8670,7 +8740,7 @@ static void bnx2x_link_settings_requested(struct bnx2x_softc *sc)
break;
default:
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Invalid NVRAM config link_config=0x%08x "
"speed_cap_mask=0x%08x", link_config,
sc->link_params.speed_cap_mask[idx]);
@@ -8701,7 +8771,7 @@ static void bnx2x_get_phy_info(struct bnx2x_softc *sc)
uint8_t port = SC_PORT(sc);
uint32_t eee_mode;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
/* shmem data already read in bnx2x_get_shmem_info() */
@@ -8861,7 +8931,7 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc)
snprintf(buf, sizeof(buf), "fp_%d_sb", i);
if (bnx2x_dma_alloc(sc, sizeof(union bnx2x_host_hc_status_block),
&fp->sb_dma, buf, RTE_CACHE_LINE_SIZE) != 0) {
- PMD_DRV_LOG(NOTICE, "Failed to alloc %s", buf);
+ PMD_DRV_LOG(NOTICE, sc, "Failed to alloc %s", buf);
return -1;
} else {
if (CHIP_IS_E2E3(sc)) {
@@ -8951,7 +9021,7 @@ static int bnx2x_prev_mcp_done(struct bnx2x_softc *sc)
uint32_t rc = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_DONE,
DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
if (!rc) {
- PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting");
return -1;
}
@@ -8983,12 +9053,12 @@ static uint8_t bnx2x_prev_is_path_marked(struct bnx2x_softc *sc)
tmp = bnx2x_prev_path_get_entry(sc);
if (tmp) {
if (tmp->aer) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Path %d/%d/%d was marked by AER",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
} else {
rc = TRUE;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Path %d/%d/%d was already cleaned from previous drivers",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
}
@@ -9009,11 +9079,11 @@ static int bnx2x_prev_mark_path(struct bnx2x_softc *sc, uint8_t after_undi)
tmp = bnx2x_prev_path_get_entry(sc);
if (tmp) {
if (!tmp->aer) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Re-marking AER in path %d/%d/%d",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
} else {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Removing AER indication from path %d/%d/%d",
sc->pcie_bus, sc->pcie_device, SC_PATH(sc));
tmp->aer = 0;
@@ -9029,7 +9099,7 @@ static int bnx2x_prev_mark_path(struct bnx2x_softc *sc, uint8_t after_undi)
tmp = rte_malloc("", sizeof(struct bnx2x_prev_list_node),
RTE_CACHE_LINE_SIZE);
if (!tmp) {
- PMD_DRV_LOG(NOTICE, "Failed to allocate 'bnx2x_prev_list_node'");
+ PMD_DRV_LOG(NOTICE, sc, "Failed to allocate 'bnx2x_prev_list_node'");
return -1;
}
@@ -9054,13 +9124,13 @@ static int bnx2x_do_flr(struct bnx2x_softc *sc)
/* only E2 and onwards support FLR */
if (CHIP_IS_E1x(sc)) {
- PMD_DRV_LOG(WARNING, "FLR not supported in E1H");
+ PMD_DRV_LOG(WARNING, sc, "FLR not supported in E1H");
return -1;
}
/* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
if (sc->devinfo.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
- PMD_DRV_LOG(WARNING,
+ PMD_DRV_LOG(WARNING, sc,
"FLR not supported by BC_VER: 0x%08x",
sc->devinfo.bc_ver);
return -1;
@@ -9077,7 +9147,7 @@ static int bnx2x_do_flr(struct bnx2x_softc *sc)
}
}
- PMD_DRV_LOG(NOTICE, "PCIE transaction is not cleared, "
+ PMD_DRV_LOG(NOTICE, sc, "PCIE transaction is not cleared, "
"proceeding with reset anyway");
clear:
@@ -9225,7 +9295,7 @@ static int bnx2x_prev_unload_common(struct bnx2x_softc *sc)
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
tmp_reg = REG_RD(sc, DORQ_REG_NORM_CID_OFST);
if (tmp_reg == 0x7) {
- PMD_DRV_LOG(DEBUG, "UNDI previously loaded");
+ PMD_DRV_LOG(DEBUG, sc, "UNDI previously loaded");
prev_undi = TRUE;
/* clear the UNDI indication */
REG_WR(sc, DORQ_REG_NORM_CID_OFST, 0);
@@ -9244,7 +9314,7 @@ static int bnx2x_prev_unload_common(struct bnx2x_softc *sc)
break;
}
- PMD_DRV_LOG(DEBUG, "BRB still has 0x%08x", tmp_reg);
+ PMD_DRV_LOG(DEBUG, sc, "BRB still has 0x%08x", tmp_reg);
/* reset timer as long as BRB actually gets emptied */
if (prev_brb > tmp_reg) {
@@ -9262,7 +9332,7 @@ static int bnx2x_prev_unload_common(struct bnx2x_softc *sc)
}
if (!timer_count) {
- PMD_DRV_LOG(NOTICE, "Failed to empty BRB");
+ PMD_DRV_LOG(NOTICE, sc, "Failed to empty BRB");
}
}
@@ -9317,7 +9387,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x_softc *sc)
return 0;
}
- PMD_DRV_LOG(INFO, "Could not FLR");
+ PMD_DRV_LOG(INFO, sc, "Could not FLR");
/* Close the MCP request, return failure */
rc = bnx2x_prev_mcp_done(sc);
@@ -9334,6 +9404,8 @@ static int bnx2x_prev_unload(struct bnx2x_softc *sc)
uint32_t fw, hw_lock_reg, hw_lock_val;
uint32_t rc = 0;
+ PMD_INIT_FUNC_TRACE(sc);
+
/*
* Clear HW from errors which may have resulted from an interrupted
* DMAE transaction.
@@ -9341,22 +9413,23 @@ static int bnx2x_prev_unload(struct bnx2x_softc *sc)
bnx2x_prev_interrupted_dmae(sc);
/* Release previously held locks */
- if (SC_FUNC(sc) <= 5)
- hw_lock_reg = (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8);
- else
- hw_lock_reg =
- (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
+ hw_lock_reg = (SC_FUNC(sc) <= 5) ?
+ (MISC_REG_DRIVER_CONTROL_1 + SC_FUNC(sc) * 8) :
+ (MISC_REG_DRIVER_CONTROL_7 + (SC_FUNC(sc) - 6) * 8);
hw_lock_val = (REG_RD(sc, hw_lock_reg));
if (hw_lock_val) {
if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
+ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held NVRAM lock\n");
REG_WR(sc, MCP_REG_MCPR_NVM_SW_ARB,
(MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << SC_PORT(sc)));
}
+ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held HW lock\n");
REG_WR(sc, hw_lock_reg, 0xffffffff);
}
if (MCPR_ACCESS_LOCK_LOCK & REG_RD(sc, MCP_REG_MCPR_ACCESS_LOCK)) {
+ PMD_DRV_LOG(DEBUG, sc, "Releasing previously held ALR\n");
REG_WR(sc, MCP_REG_MCPR_ACCESS_LOCK, 0);
}
@@ -9364,7 +9437,7 @@ static int bnx2x_prev_unload(struct bnx2x_softc *sc)
/* Lock MCP using an unload request */
fw = bnx2x_fw_command(sc, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
if (!fw) {
- PMD_DRV_LOG(NOTICE, "MCP response failure, aborting");
+ PMD_DRV_LOG(NOTICE, sc, "MCP response failure, aborting");
rc = -1;
break;
}
@@ -9384,7 +9457,7 @@ static int bnx2x_prev_unload(struct bnx2x_softc *sc)
} while (--time_counter);
if (!time_counter || rc) {
- PMD_DRV_LOG(NOTICE, "Failed to unload previous driver!");
+ PMD_DRV_LOG(NOTICE, sc, "Failed to unload previous driver!");
rc = -1;
}
@@ -9401,7 +9474,7 @@ bnx2x_dcbx_set_state(struct bnx2x_softc *sc, uint8_t dcb_on, uint32_t dcbx_enabl
sc->dcb_state = FALSE;
sc->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
}
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"DCB state [%s:%s]",
dcb_on ? "ON" : "OFF",
(dcbx_enabled == BNX2X_DCBX_ENABLED_OFF) ? "user-mode" :
@@ -9434,7 +9507,7 @@ static void bnx2x_init_multi_cos(struct bnx2x_softc *sc)
if (cos < sc->max_cos) {
sc->prio_to_cos[pri] = cos;
} else {
- PMD_DRV_LOG(WARNING,
+ PMD_DRV_LOG(WARNING, sc,
"Invalid COS %d for priority %d "
"(max COS is %d), setting to 0", cos, pri,
(sc->max_cos - 1));
@@ -9455,7 +9528,7 @@ static int bnx2x_pci_get_caps(struct bnx2x_softc *sc)
cap = sc->pci_caps = rte_zmalloc("caps", sizeof(struct bnx2x_pci_cap),
RTE_CACHE_LINE_SIZE);
if (!cap) {
- PMD_DRV_LOG(NOTICE, "Failed to allocate memory");
+ PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory");
return -ENOMEM;
}
@@ -9466,7 +9539,7 @@ static int bnx2x_pci_get_caps(struct bnx2x_softc *sc)
pci_read(sc, PCIR_STATUS, &status, 2);
if (!(status & PCIM_STATUS_CAPPRESENT)) {
#endif
- PMD_DRV_LOG(NOTICE, "PCIe capability reading failed");
+ PMD_DRV_LOG(NOTICE, sc, "PCIe capability reading failed");
return -1;
}
@@ -9486,7 +9559,7 @@ static int bnx2x_pci_get_caps(struct bnx2x_softc *sc)
sizeof(struct bnx2x_pci_cap),
RTE_CACHE_LINE_SIZE);
if (!cap->next) {
- PMD_DRV_LOG(NOTICE, "Failed to allocate memory");
+ PMD_DRV_LOG(NOTICE, sc, "Failed to allocate memory");
return -ENOMEM;
}
cap = cap->next;
@@ -9522,25 +9595,25 @@ void bnx2x_load_firmware(struct bnx2x_softc *sc)
? FW_NAME_57711 : FW_NAME_57810;
f = open(fwname, O_RDONLY);
if (f < 0) {
- PMD_DRV_LOG(NOTICE, "Can't open firmware file");
+ PMD_DRV_LOG(NOTICE, sc, "Can't open firmware file");
return;
}
if (fstat(f, &st) < 0) {
- PMD_DRV_LOG(NOTICE, "Can't stat firmware file");
+ PMD_DRV_LOG(NOTICE, sc, "Can't stat firmware file");
close(f);
return;
}
sc->firmware = rte_zmalloc("bnx2x_fw", st.st_size, RTE_CACHE_LINE_SIZE);
if (!sc->firmware) {
- PMD_DRV_LOG(NOTICE, "Can't allocate memory for firmware");
+ PMD_DRV_LOG(NOTICE, sc, "Can't allocate memory for firmware");
close(f);
return;
}
if (read(f, sc->firmware, st.st_size) != st.st_size) {
- PMD_DRV_LOG(NOTICE, "Can't read firmware data");
+ PMD_DRV_LOG(NOTICE, sc, "Can't read firmware data");
close(f);
return;
}
@@ -9548,10 +9621,11 @@ void bnx2x_load_firmware(struct bnx2x_softc *sc)
sc->fw_len = st.st_size;
if (sc->fw_len < FW_HEADER_LEN) {
- PMD_DRV_LOG(NOTICE, "Invalid fw size: %" PRIu64, sc->fw_len);
+ PMD_DRV_LOG(NOTICE, sc,
+ "Invalid fw size: %" PRIu64, sc->fw_len);
return;
}
- PMD_DRV_LOG(DEBUG, "fw_len = %" PRIu64, sc->fw_len);
+ PMD_DRV_LOG(DEBUG, sc, "fw_len = %" PRIu64, sc->fw_len);
}
static void
@@ -9618,11 +9692,11 @@ int bnx2x_attach(struct bnx2x_softc *sc)
{
int rc;
- PMD_DRV_LOG(DEBUG, "Starting attach...");
+ PMD_DRV_LOG(DEBUG, sc, "Starting attach...");
rc = bnx2x_pci_get_caps(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "PCIe caps reading was failed");
+ PMD_DRV_LOG(NOTICE, sc, "PCIe caps reading was failed");
return rc;
}
@@ -9661,7 +9735,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)
/* get device info and set params */
if (bnx2x_get_device_info(sc) != 0) {
- PMD_DRV_LOG(NOTICE, "getting device info");
+ PMD_DRV_LOG(NOTICE, sc, "getting device info");
return -ENXIO;
}
@@ -9685,6 +9759,8 @@ int bnx2x_attach(struct bnx2x_softc *sc)
sc->fw_seq =
(SHMEM_RD(sc, func_mb[SC_FW_MB_IDX(sc)].drv_mb_header) &
DRV_MSG_SEQ_NUMBER_MASK);
+ PMD_DRV_LOG(DEBUG, sc, "prev unload fw_seq 0x%04x",
+ sc->fw_seq);
bnx2x_prev_unload(sc);
}
@@ -9760,7 +9836,7 @@ bnx2x_igu_clear_sb_gen(struct bnx2x_softc *sc, uint8_t func, uint8_t idu_sb_id,
mb();
- PMD_DRV_LOG(DEBUG, "write 0x%08x to IGU(via GRC) addr 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "write 0x%08x to IGU(via GRC) addr 0x%x",
ctl, igu_addr_ctl);
REG_WR(sc, igu_addr_ctl, ctl);
@@ -9772,7 +9848,7 @@ bnx2x_igu_clear_sb_gen(struct bnx2x_softc *sc, uint8_t func, uint8_t idu_sb_id,
}
if (!(REG_RD(sc, igu_addr_ack) & sb_bit)) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Unable to finish IGU cleanup: "
"idu_sb_id %d offset %d bit %d (cnt %d)",
idu_sb_id, idu_sb_id / 32, idu_sb_id % 32, cnt);
@@ -9792,7 +9868,7 @@ static void bnx2x_reset_common(struct bnx2x_softc *sc)
{
uint32_t val = 0x1400;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
/* reset_common */
REG_WR(sc, (GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR),
@@ -9826,8 +9902,10 @@ static void bnx2x_common_init_phy(struct bnx2x_softc *sc)
shmem2_base[1] = SHMEM2_RD(sc, other_shmem2_base_addr);
}
+ bnx2x_acquire_phy_lock(sc);
elink_common_init_phy(sc, shmem_base, shmem2_base,
sc->devinfo.chip_id, 0);
+ bnx2x_release_phy_lock(sc);
}
static void bnx2x_pf_disable(struct bnx2x_softc *sc)
@@ -10001,7 +10079,8 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
uint8_t abs_func_id;
uint32_t val;
- PMD_DRV_LOG(DEBUG, "starting common init for func %d", SC_ABS_FUNC(sc));
+ PMD_DRV_LOG(DEBUG, sc,
+ "starting common init for func %d", SC_ABS_FUNC(sc));
/*
* take the RESET lock to protect undi_unload flow from accessing
@@ -10084,12 +10163,12 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE);
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "PXP2 CFG failed");
+ PMD_DRV_LOG(NOTICE, sc, "PXP2 CFG failed");
return -1;
}
val = REG_RD(sc, PXP2_REG_RD_INIT_DONE);
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "PXP2 RD_INIT failed");
+ PMD_DRV_LOG(NOTICE, sc, "PXP2 RD_INIT failed");
return -1;
}
@@ -10211,7 +10290,7 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
} while (factor-- && (val != 1));
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "ATC_INIT failed");
+ PMD_DRV_LOG(NOTICE, sc, "ATC_INIT failed");
return -1;
}
}
@@ -10349,7 +10428,7 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
if (sizeof(union cdu_context) != 1024) {
/* we currently assume that a context is 1024 bytes */
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"please adjust the size of cdu_context(%ld)",
(long)sizeof(union cdu_context));
}
@@ -10411,17 +10490,17 @@ static int bnx2x_init_hw_common(struct bnx2x_softc *sc)
/* finish CFC init */
val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10);
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "CFC LL_INIT failed");
+ PMD_DRV_LOG(NOTICE, sc, "CFC LL_INIT failed");
return -1;
}
val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10);
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "CFC AC_INIT failed");
+ PMD_DRV_LOG(NOTICE, sc, "CFC AC_INIT failed");
return -1;
}
val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
if (val != 1) {
- PMD_DRV_LOG(NOTICE, "CFC CAM_INIT failed");
+ PMD_DRV_LOG(NOTICE, sc, "CFC CAM_INIT failed");
return -1;
}
REG_WR(sc, CFC_REG_DEBUG0, 0);
@@ -10474,7 +10553,7 @@ static int bnx2x_init_hw_port(struct bnx2x_softc *sc)
uint32_t low, high;
uint32_t val;
- PMD_DRV_LOG(DEBUG, "starting port init for port %d", port);
+ PMD_DRV_LOG(DEBUG, sc, "starting port init for port %d", port);
REG_WR(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, 0);
@@ -10701,7 +10780,7 @@ bnx2x_flr_clnup_poll_hw_counter(struct bnx2x_softc *sc, uint32_t reg,
uint32_t val = bnx2x_flr_clnup_reg_poll(sc, reg, 0, poll_cnt);
if (val != 0) {
- PMD_DRV_LOG(NOTICE, "%s usage count=%d", msg, val);
+ PMD_DRV_LOG(NOTICE, sc, "%s usage count=%d", msg, val);
return -1;
}
@@ -10793,7 +10872,7 @@ bnx2x_send_final_clnup(struct bnx2x_softc *sc, uint8_t clnup_func,
int ret = 0;
if (REG_RD(sc, comp_addr)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(NOTICE, sc,
"Cleanup complete was not 0 before sending");
return -1;
}
@@ -10806,8 +10885,8 @@ bnx2x_send_final_clnup(struct bnx2x_softc *sc, uint8_t clnup_func,
REG_WR(sc, XSDM_REG_OPERATION_GEN, op_gen_command);
if (bnx2x_flr_clnup_reg_poll(sc, comp_addr, 1, poll_cnt) != 1) {
- PMD_DRV_LOG(NOTICE, "FW final cleanup did not succeed");
- PMD_DRV_LOG(DEBUG, "At timeout completion address contained %x",
+ PMD_DRV_LOG(NOTICE, sc, "FW final cleanup did not succeed");
+ PMD_DRV_LOG(DEBUG, sc, "At timeout completion address contained %x",
(REG_RD(sc, comp_addr)));
rte_panic("FLR cleanup failed");
return -1;
@@ -10923,28 +11002,30 @@ static void bnx2x_hw_enable_status(struct bnx2x_softc *sc)
__rte_unused uint32_t val;
val = REG_RD(sc, CFC_REG_WEAK_ENABLE_PF);
- PMD_DRV_LOG(DEBUG, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "CFC_REG_WEAK_ENABLE_PF is 0x%x", val);
val = REG_RD(sc, PBF_REG_DISABLE_PF);
- PMD_DRV_LOG(DEBUG, "PBF_REG_DISABLE_PF is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "PBF_REG_DISABLE_PF is 0x%x", val);
val = REG_RD(sc, IGU_REG_PCI_PF_MSI_EN);
- PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSI_EN is 0x%x", val);
val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_EN);
- PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_EN is 0x%x", val);
val = REG_RD(sc, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
- PMD_DRV_LOG(DEBUG, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x", val);
val = REG_RD(sc, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
- PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc,
+ "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x", val);
val = REG_RD(sc, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
- PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc,
+ "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x", val);
val = REG_RD(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
- PMD_DRV_LOG(DEBUG, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x",
val);
}
@@ -10988,7 +11069,7 @@ static int bnx2x_pf_flr_clnup(struct bnx2x_softc *sc)
/* Verify no pending pci transactions */
if (bnx2x_is_pcie_pending(sc)) {
- PMD_DRV_LOG(NOTICE, "PCIE Transactions still pending");
+ PMD_DRV_LOG(NOTICE, sc, "PCIE Transactions still pending");
}
/* Debug */
@@ -11015,13 +11096,13 @@ static int bnx2x_init_hw_func(struct bnx2x_softc *sc)
int main_mem_width, rc;
uint32_t i;
- PMD_DRV_LOG(DEBUG, "starting func init for func %d", func);
+ PMD_DRV_LOG(DEBUG, sc, "starting func init for func %d", func);
/* FLR cleanup */
if (!CHIP_IS_E1x(sc)) {
rc = bnx2x_pf_flr_clnup(sc);
if (rc) {
- PMD_DRV_LOG(NOTICE, "FLR cleanup failed!");
+ PMD_DRV_LOG(NOTICE, sc, "FLR cleanup failed!");
return rc;
}
}
@@ -11268,7 +11349,7 @@ static int bnx2x_init_hw_func(struct bnx2x_softc *sc)
val = REG_RD(sc, main_mem_prty_clr);
if (val) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Parity errors in HC block during function init (0x%x)!",
val);
}
@@ -11303,10 +11384,12 @@ static int bnx2x_init_hw_func(struct bnx2x_softc *sc)
static void bnx2x_link_reset(struct bnx2x_softc *sc)
{
if (!BNX2X_NOMCP(sc)) {
+ bnx2x_acquire_phy_lock(sc);
elink_lfa_reset(&sc->link_params, &sc->link_vars);
+ bnx2x_release_phy_lock(sc);
} else {
if (!CHIP_REV_IS_SLOW(sc)) {
- PMD_DRV_LOG(WARNING,
+ PMD_DRV_LOG(WARNING, sc,
"Bootcode is missing - cannot reset link");
}
}
@@ -11336,7 +11419,7 @@ static void bnx2x_reset_port(struct bnx2x_softc *sc)
/* Check for BRB port occupancy */
val = REG_RD(sc, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port * 4);
if (val) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"BRB1 is not empty, %d blocks are occupied", val);
}
}
@@ -11530,10 +11613,10 @@ static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len)
int ret;
int data_begin = cut_gzip_prefix(zbuf, len);
- PMD_DRV_LOG(DEBUG, "ecore_gunzip %d", len);
+ PMD_DRV_LOG(DEBUG, sc, "ecore_gunzip %d", len);
if (data_begin <= 0) {
- PMD_DRV_LOG(NOTICE, "bad gzip prefix");
+ PMD_DRV_LOG(NOTICE, sc, "bad gzip prefix");
return -1;
}
@@ -11545,19 +11628,19 @@ static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len)
ret = inflateInit2(&zlib_stream, -MAX_WBITS);
if (ret != Z_OK) {
- PMD_DRV_LOG(NOTICE, "zlib inflateInit2 error");
+ PMD_DRV_LOG(NOTICE, sc, "zlib inflateInit2 error");
return ret;
}
ret = inflate(&zlib_stream, Z_FINISH);
if ((ret != Z_STREAM_END) && (ret != Z_OK)) {
- PMD_DRV_LOG(NOTICE, "zlib inflate error: %d %s", ret,
+ PMD_DRV_LOG(NOTICE, sc, "zlib inflate error: %d %s", ret,
zlib_stream.msg);
}
sc->gz_outlen = zlib_stream.total_out;
if (sc->gz_outlen & 0x3) {
- PMD_DRV_LOG(NOTICE, "firmware is not aligned. gz_outlen == %d",
+ PMD_DRV_LOG(NOTICE, sc, "firmware is not aligned. gz_outlen == %d",
sc->gz_outlen);
}
sc->gz_outlen >>= 2;
@@ -11587,6 +11670,7 @@ ecore_storm_memset_struct(struct bnx2x_softc *sc, uint32_t addr, size_t size,
}
}
+#ifdef RTE_LIBRTE_BNX2X_DEBUG
static const char *get_ext_phy_type(uint32_t ext_phy_type)
{
uint32_t phy_type_idx = ext_phy_type >> 8;
@@ -11667,6 +11751,7 @@ static const char *get_bnx2x_flags(uint32_t flags)
}
return flag_str;
}
+#endif
/*
* Prints useful adapter info.
@@ -11676,7 +11761,7 @@ void bnx2x_print_adapter_info(struct bnx2x_softc *sc)
int i = 0;
__rte_unused uint32_t ext_phy_type;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
if (sc->link_vars.phy_flags & PHY_XGXS_FLAG)
ext_phy_type = ELINK_XGXS_EXT_PHY_TYPE(REG_RD(sc,
sc->
@@ -11695,97 +11780,102 @@ void bnx2x_print_adapter_info(struct bnx2x_softc *sc)
dev_info.port_hw_config
[0].external_phy_config)));
- PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
+ PMD_DRV_LOG(INFO, sc, "\n\n===================================\n");
/* Hardware chip info. */
- PMD_INIT_LOG(DEBUG, "%12s : %#08x", "ASIC", sc->devinfo.chip_id);
- PMD_INIT_LOG(DEBUG, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A',
+ PMD_DRV_LOG(INFO, sc, "%12s : %#08x", "ASIC", sc->devinfo.chip_id);
+ PMD_DRV_LOG(INFO, sc, "%12s : %c%d", "Rev", (CHIP_REV(sc) >> 12) + 'A',
(CHIP_METAL(sc) >> 4));
/* Bus info. */
- PMD_INIT_LOG(DEBUG, "%12s : %d, ", "Bus PCIe", sc->devinfo.pcie_link_width);
+ PMD_DRV_LOG(INFO, sc,
+ "%12s : %d, ", "Bus PCIe", sc->devinfo.pcie_link_width);
switch (sc->devinfo.pcie_link_speed) {
case 1:
- PMD_INIT_LOG(DEBUG, "%23s", "2.5 Gbps");
+ PMD_DRV_LOG(INFO, sc, "%23s", "2.5 Gbps");
break;
case 2:
- PMD_INIT_LOG(DEBUG, "%21s", "5 Gbps");
+ PMD_DRV_LOG(INFO, sc, "%21s", "5 Gbps");
break;
case 4:
- PMD_INIT_LOG(DEBUG, "%21s", "8 Gbps");
+ PMD_DRV_LOG(INFO, sc, "%21s", "8 Gbps");
break;
default:
- PMD_INIT_LOG(DEBUG, "%33s", "Unknown link speed");
+ PMD_DRV_LOG(INFO, sc, "%33s", "Unknown link speed");
}
/* Device features. */
- PMD_INIT_LOG(DEBUG, "%12s : ", "Flags");
+ PMD_DRV_LOG(INFO, sc, "%12s : ", "Flags");
/* Miscellaneous flags. */
if (sc->devinfo.pcie_cap_flags & BNX2X_MSI_CAPABLE_FLAG) {
- PMD_INIT_LOG(DEBUG, "%18s", "MSI");
+ PMD_DRV_LOG(INFO, sc, "%18s", "MSI");
i++;
}
if (sc->devinfo.pcie_cap_flags & BNX2X_MSIX_CAPABLE_FLAG) {
if (i > 0)
- PMD_INIT_LOG(DEBUG, "|");
- PMD_INIT_LOG(DEBUG, "%20s", "MSI-X");
+ PMD_DRV_LOG(INFO, sc, "|");
+ PMD_DRV_LOG(INFO, sc, "%20s", "MSI-X");
i++;
}
if (IS_PF(sc)) {
- PMD_INIT_LOG(DEBUG, "%12s : ", "Queues");
+ PMD_DRV_LOG(INFO, sc, "%12s : ", "Queues");
switch (sc->sp->rss_rdata.rss_mode) {
case ETH_RSS_MODE_DISABLED:
- PMD_INIT_LOG(DEBUG, "%19s", "None");
+ PMD_DRV_LOG(INFO, sc, "%19s", "None");
break;
case ETH_RSS_MODE_REGULAR:
- PMD_INIT_LOG(DEBUG, "%18s : %d", "RSS", sc->num_queues);
+ PMD_DRV_LOG(INFO, sc,
+ "%18s : %d", "RSS", sc->num_queues);
break;
default:
- PMD_INIT_LOG(DEBUG, "%22s", "Unknown");
+ PMD_DRV_LOG(INFO, sc, "%22s", "Unknown");
break;
}
}
/* RTE and Driver versions */
- PMD_INIT_LOG(DEBUG, "%12s : %s", "DPDK",
- rte_version());
- PMD_INIT_LOG(DEBUG, "%12s : %s", "Driver",
- bnx2x_pmd_version());
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "DPDK",
+ rte_version());
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "Driver",
+ bnx2x_pmd_version());
/* Firmware versions and device features. */
- PMD_INIT_LOG(DEBUG, "%12s : %d.%d.%d",
+ PMD_DRV_LOG(INFO, sc, "%12s : %d.%d.%d",
"Firmware",
BNX2X_5710_FW_MAJOR_VERSION,
BNX2X_5710_FW_MINOR_VERSION,
BNX2X_5710_FW_REVISION_VERSION);
- PMD_INIT_LOG(DEBUG, "%12s : %s",
+ PMD_DRV_LOG(INFO, sc, "%12s : %s",
"Bootcode", sc->devinfo.bc_ver_str);
- PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
- PMD_INIT_LOG(DEBUG, "%12s : %u", "Bnx2x Func", sc->pcie_func);
- PMD_INIT_LOG(DEBUG, "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags));
- PMD_INIT_LOG(DEBUG, "%12s : %s", "DMAE Is",
+ PMD_DRV_LOG(INFO, sc, "\n\n===================================\n");
+ PMD_DRV_LOG(INFO, sc, "%12s : %u", "Bnx2x Func", sc->pcie_func);
+ PMD_DRV_LOG(INFO, sc,
+ "%12s : %s", "Bnx2x Flags", get_bnx2x_flags(sc->flags));
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "DMAE Is",
(sc->dmae_ready ? "Ready" : "Not Ready"));
- PMD_INIT_LOG(DEBUG, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO"));
- PMD_INIT_LOG(DEBUG, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO"));
- PMD_INIT_LOG(DEBUG, "%12s : %u", "MTU", sc->mtu);
- PMD_INIT_LOG(DEBUG, "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type));
- PMD_INIT_LOG(DEBUG, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr",
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "OVLAN", (OVLAN(sc) ? "YES" : "NO"));
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "MF", (IS_MF(sc) ? "YES" : "NO"));
+ PMD_DRV_LOG(INFO, sc, "%12s : %u", "MTU", sc->mtu);
+ PMD_DRV_LOG(INFO, sc,
+ "%12s : %s", "PHY Type", get_ext_phy_type(ext_phy_type));
+ PMD_DRV_LOG(INFO, sc, "%12s : %x:%x:%x:%x:%x:%x", "MAC Addr",
sc->link_params.mac_addr[0],
sc->link_params.mac_addr[1],
sc->link_params.mac_addr[2],
sc->link_params.mac_addr[3],
sc->link_params.mac_addr[4],
sc->link_params.mac_addr[5]);
- PMD_INIT_LOG(DEBUG, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode));
- PMD_INIT_LOG(DEBUG, "%12s : %s", "State", get_state(sc->state));
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "RX Mode", get_rx_mode(sc->rx_mode));
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "State", get_state(sc->state));
if (sc->recovery_state)
- PMD_INIT_LOG(DEBUG, "%12s : %s", "Recovery",
+ PMD_DRV_LOG(INFO, sc, "%12s : %s", "Recovery",
get_recovery_state(sc->recovery_state));
- PMD_INIT_LOG(DEBUG, "%12s : CQ = %lx, EQ = %lx", "SPQ Left",
+ PMD_DRV_LOG(INFO, sc, "%12s : CQ = %lx, EQ = %lx", "SPQ Left",
sc->cq_spq_left, sc->eq_spq_left);
- PMD_INIT_LOG(DEBUG, "%12s : %x", "Switch", sc->link_params.switch_cfg);
- PMD_INIT_LOG(DEBUG, "\n\n===================================\n");
+ PMD_DRV_LOG(INFO, sc,
+ "%12s : %x", "Switch", sc->link_params.switch_cfg);
+ PMD_DRV_LOG(INFO, sc, "\n\n===================================\n");
}
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index c93f148b..2843702e 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -727,6 +727,13 @@ struct bnx2x_port {
uint32_t phy_addr;
+ /* Used to synchronize phy accesses. */
+ rte_spinlock_t phy_mtx;
+ char phy_mtx_name[32];
+
+#define BNX2X_PHY_LOCK(sc) rte_spinlock_lock(&sc->port.phy_mtx)
+#define BNX2X_PHY_UNLOCK(sc) rte_spinlock_unlock(&sc->port.phy_mtx)
+
/*
* MCP scratchpad address for port specific statistics.
* The device is responsible for writing statistcss
@@ -805,6 +812,10 @@ struct bnx2x_mf_info {
/* Device information data structure. */
struct bnx2x_devinfo {
+#if 1
+#define NAME_SIZE 128
+ char name[NAME_SIZE];
+#endif
/* PCIe info */
uint16_t vendor_id;
uint16_t device_id;
@@ -1420,7 +1431,7 @@ struct bnx2x_func_init_params {
static inline void
bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val)
{
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%02x",
(unsigned long)offset, val);
rte_write8(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
}
@@ -1430,10 +1441,10 @@ bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val)
{
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
if ((offset % 2) != 0)
- PMD_DRV_LOG(NOTICE, "Unaligned 16-bit write to 0x%08lx",
+ PMD_DRV_LOG(NOTICE, sc, "Unaligned 16-bit write to 0x%08lx",
(unsigned long)offset);
#endif
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%04x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%04x",
(unsigned long)offset, val);
rte_write16(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
@@ -1444,11 +1455,11 @@ bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val)
{
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
if ((offset % 4) != 0)
- PMD_DRV_LOG(NOTICE, "Unaligned 32-bit write to 0x%08lx",
+ PMD_DRV_LOG(NOTICE, sc, "Unaligned 32-bit write to 0x%08lx",
(unsigned long)offset);
#endif
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
rte_write32(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
}
@@ -1459,7 +1470,7 @@ bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset)
uint8_t val;
val = rte_read8((uint8_t *)sc->bar[BAR0].base_addr + offset);
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%02x",
(unsigned long)offset, val);
return val;
@@ -1472,12 +1483,12 @@ bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset)
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
if ((offset % 2) != 0)
- PMD_DRV_LOG(NOTICE, "Unaligned 16-bit read from 0x%08lx",
+ PMD_DRV_LOG(NOTICE, sc, "Unaligned 16-bit read from 0x%08lx",
(unsigned long)offset);
#endif
val = rte_read16(((uint8_t *)sc->bar[BAR0].base_addr + offset));
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
return val;
@@ -1490,12 +1501,12 @@ bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
if ((offset % 4) != 0)
- PMD_DRV_LOG(NOTICE, "Unaligned 32-bit read from 0x%08lx",
+ PMD_DRV_LOG(NOTICE, sc, "Unaligned 32-bit read from 0x%08lx",
(unsigned long)offset);
#endif
val = rte_read32(((uint8_t *)sc->bar[BAR0].base_addr + offset));
- PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
return val;
@@ -1972,7 +1983,7 @@ bnx2x_set_rx_mode(struct bnx2x_softc *sc)
bnx2x_vf_set_rx_mode(sc);
}
} else {
- PMD_DRV_LOG(NOTICE, "Card is not ready to change mode");
+ PMD_DRV_LOG(NOTICE, sc, "Card is not ready to change mode");
}
}
@@ -1980,7 +1991,7 @@ static inline int pci_read(struct bnx2x_softc *sc, size_t addr,
void *val, uint8_t size)
{
if (rte_pci_read_config(sc->pci_dev, val, size, addr) <= 0) {
- PMD_DRV_LOG(ERR, "Can't read from PCI config space");
+ PMD_DRV_LOG(ERR, sc, "Can't read from PCI config space");
return ENXIO;
}
@@ -1993,7 +2004,7 @@ static inline int pci_write_word(struct bnx2x_softc *sc, size_t addr, off_t val)
if (rte_pci_write_config(sc->pci_dev, &val16,
sizeof(val16), addr) <= 0) {
- PMD_DRV_LOG(ERR, "Can't write to PCI config space");
+ PMD_DRV_LOG(ERR, sc, "Can't write to PCI config space");
return ENXIO;
}
@@ -2005,7 +2016,7 @@ static inline int pci_write_long(struct bnx2x_softc *sc, size_t addr, off_t val)
uint32_t val32 = val;
if (rte_pci_write_config(sc->pci_dev, &val32,
sizeof(val32), addr) <= 0) {
- PMD_DRV_LOG(ERR, "Can't write to PCI config space");
+ PMD_DRV_LOG(ERR, sc, "Can't write to PCI config space");
return ENXIO;
}
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 650d6ce6..96151d0d 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -139,7 +139,7 @@ bnx2x_link_update(struct rte_eth_dev *dev)
struct rte_eth_link orig;
struct rte_eth_link link;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
bnx2x_link_status_update(sc);
memset(&orig, 0, sizeof(orig));
@@ -186,9 +186,11 @@ bnx2x_interrupt_handler(void *param)
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
+ PMD_DEBUG_PERIODIC_LOG(INFO, sc, "Interrupt handled");
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
bnx2x_interrupt_action(dev);
+ atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
rte_intr_enable(&sc->pci_dev->intr_handle);
}
@@ -204,8 +206,8 @@ static void bnx2x_periodic_start(void *param)
ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
bnx2x_periodic_start, (void *)dev);
if (ret) {
- PMD_DRV_LOG(ERR, "Unable to start periodic"
- " timer rc %d", ret);
+ PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
+ " timer rc %d", ret);
assert(false && "Unable to start periodic timer");
}
}
@@ -229,7 +231,11 @@ static int
bnx2x_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
- PMD_INIT_FUNC_TRACE();
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_INIT
+ struct bnx2x_softc *sc = dev->data->dev_private;
+#endif
+
+ PMD_INIT_FUNC_TRACE(sc);
return bnx2x_link_update(dev);
}
@@ -245,7 +251,7 @@ bnx2xvf_dev_link_update(struct rte_eth_dev *dev,
bnx2x_check_bull(sc);
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
- PMD_DRV_LOG(ERR, "PF indicated channel is down."
+ PMD_DRV_LOG(ERR, sc, "PF indicated channel is down."
"VF device is no longer operational");
dev->data->dev_link.link_status = ETH_LINK_DOWN;
}
@@ -259,34 +265,34 @@ bnx2x_dev_configure(struct rte_eth_dev *dev)
struct bnx2x_softc *sc = dev->data->dev_private;
int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
if (dev->data->dev_conf.rxmode.jumbo_frame)
sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
- PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
+ PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
return -EINVAL;
}
sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
if (sc->num_queues > mp_ncpus) {
- PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
+ PMD_DRV_LOG(ERR, sc, "The number of queues is more than number of CPUs");
return -EINVAL;
}
- PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
+ PMD_DRV_LOG(DEBUG, sc, "num_queues=%d, mtu=%d",
sc->num_queues, sc->mtu);
/* allocate ilt */
if (bnx2x_alloc_ilt_mem(sc) != 0) {
- PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
+ PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_ilt_mem was failed");
return -ENXIO;
}
/* allocate the host hardware/software hsi structures */
if (bnx2x_alloc_hsi_mem(sc) != 0) {
- PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
+ PMD_DRV_LOG(ERR, sc, "bnx2x_alloc_hsi_mem was failed");
bnx2x_free_ilt_mem(sc);
return -ENXIO;
}
@@ -300,7 +306,7 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
struct bnx2x_softc *sc = dev->data->dev_private;
int ret = 0;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
/* start the periodic callout */
if (sc->periodic_flags & PERIODIC_STOP)
@@ -308,7 +314,7 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
ret = bnx2x_init(sc);
if (ret) {
- PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
+ PMD_DRV_LOG(DEBUG, sc, "bnx2x_init failed (%d)", ret);
return -1;
}
@@ -317,12 +323,12 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
bnx2x_interrupt_handler, (void *)dev);
if (rte_intr_enable(&sc->pci_dev->intr_handle))
- PMD_DRV_LOG(ERR, "rte_intr_enable failed");
+ PMD_DRV_LOG(ERR, sc, "rte_intr_enable failed");
}
ret = bnx2x_dev_rx_init(dev);
if (ret != 0) {
- PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
+ PMD_DRV_LOG(DEBUG, sc, "bnx2x_dev_rx_init returned error code");
return -3;
}
@@ -338,7 +344,7 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
struct bnx2x_softc *sc = dev->data->dev_private;
int ret = 0;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
if (IS_PF(sc)) {
rte_intr_disable(&sc->pci_dev->intr_handle);
@@ -351,7 +357,7 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
if (ret) {
- PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
+ PMD_DRV_LOG(DEBUG, sc, "bnx2x_nic_unload failed (%d)", ret);
return;
}
@@ -369,7 +375,7 @@ bnx2x_dev_close(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
if (IS_VF(sc))
bnx2x_vf_close(sc);
@@ -389,7 +395,7 @@ bnx2x_promisc_enable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
sc->rx_mode = BNX2X_RX_MODE_PROMISC;
if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
@@ -401,7 +407,7 @@ bnx2x_promisc_disable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
sc->rx_mode = BNX2X_RX_MODE_NORMAL;
if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
@@ -413,7 +419,7 @@ bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
@@ -425,7 +431,7 @@ bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
sc->rx_mode = BNX2X_RX_MODE_NORMAL;
if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
sc->rx_mode = BNX2X_RX_MODE_PROMISC;
@@ -440,7 +446,7 @@ bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
uint64_t brb_drops;
uint64_t brb_truncates;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
@@ -631,27 +637,35 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
{
int ret = 0;
struct rte_pci_device *pci_dev;
+ struct rte_pci_addr pci_addr;
struct bnx2x_softc *sc;
- PMD_INIT_FUNC_TRACE();
+ /* Extract key data structures */
+ sc = eth_dev->data->dev_private;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_addr = pci_dev->addr;
+
+ snprintf(sc->devinfo.name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u",
+ pci_addr.bus, pci_addr.devid, pci_addr.function,
+ eth_dev->data->port_id);
+
+ PMD_INIT_FUNC_TRACE(sc);
eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
- pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
- sc = eth_dev->data->dev_private;
sc->pcie_bus = pci_dev->addr.bus;
sc->pcie_device = pci_dev->addr.devid;
- if (is_vf)
- sc->flags = BNX2X_IS_VF_FLAG;
-
sc->devinfo.vendor_id = pci_dev->id.vendor_id;
sc->devinfo.device_id = pci_dev->id.device_id;
sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
+ if (is_vf)
+ sc->flags = BNX2X_IS_VF_FLAG;
+
sc->pcie_func = pci_dev->addr.function;
sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
if (is_vf)
@@ -679,7 +693,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
sc->pci_dev = pci_dev;
ret = bnx2x_attach(sc);
if (ret) {
- PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
+ PMD_DRV_LOG(ERR, sc, "bnx2x_attach failed (%d)", ret);
return ret;
}
@@ -688,21 +702,21 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
ret = rte_eal_alarm_set(BNX2X_SP_TIMER_PERIOD,
bnx2x_periodic_start, (void *)eth_dev);
if (ret) {
- PMD_DRV_LOG(ERR, "Unable to start periodic"
- " timer rc %d", ret);
+ PMD_DRV_LOG(ERR, sc, "Unable to start periodic"
+ " timer rc %d", ret);
return -EINVAL;
}
}
eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
- PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
+ PMD_DRV_LOG(INFO, sc, "pcie_bus=%d, pcie_device=%d",
sc->pcie_bus, sc->pcie_device);
- PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
+ PMD_DRV_LOG(INFO, sc, "bar0.addr=%p, bar1.addr=%p",
sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
- PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
+ PMD_DRV_LOG(INFO, sc, "port=%d, path=%d, vnic=%d, func=%d",
PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
- PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
+ PMD_DRV_LOG(INFO, sc, "portID=%d vendorID=0x%x deviceID=0x%x",
eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
if (IS_VF(sc)) {
@@ -742,14 +756,20 @@ out:
static int
eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
{
- PMD_INIT_FUNC_TRACE();
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_INIT
+ struct bnx2x_softc *sc = eth_dev->data->dev_private;
+#endif
+ PMD_INIT_FUNC_TRACE(sc);
return bnx2x_common_dev_init(eth_dev, 0);
}
static int
eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
{
- PMD_INIT_FUNC_TRACE();
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_INIT
+ struct bnx2x_softc *sc = eth_dev->data->dev_private;
+#endif
+ PMD_INIT_FUNC_TRACE(sc);
return bnx2x_common_dev_init(eth_dev, 1);
}
diff --git a/drivers/net/bnx2x/bnx2x_logs.h b/drivers/net/bnx2x/bnx2x_logs.h
index dff014d7..9667a889 100644
--- a/drivers/net/bnx2x/bnx2x_logs.h
+++ b/drivers/net/bnx2x/bnx2x_logs.h
@@ -11,13 +11,14 @@
#ifndef _PMD_LOGS_H_
#define _PMD_LOGS_H_
-#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+#define PMD_INIT_LOG(level, sc, fmt, args...) \
+ RTE_LOG(level, PMD, \
+ "[bnx2x_pmd: %s] %s() " fmt "\n", (sc)->devinfo.name, __func__, ##args)
#ifdef RTE_LIBRTE_BNX2X_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#define PMD_INIT_FUNC_TRACE(sc) PMD_INIT_LOG(DEBUG, sc, " >>")
#else
-#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#define PMD_INIT_FUNC_TRACE(sc) do { } while(0)
#endif
#ifdef RTE_LIBRTE_BNX2X_DEBUG_RX
@@ -42,20 +43,22 @@
#endif
#ifdef RTE_LIBRTE_BNX2X_DEBUG
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#define PMD_DRV_LOG_RAW(level, sc, fmt, args...) \
+ RTE_LOG(level, PMD, "[%s:%d(%s)] " fmt, __func__, __LINE__, \
+ (sc)->devinfo.name ? (sc)->devinfo.name : "", ## args)
#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#define PMD_DRV_LOG_RAW(level, sc, fmt, args...) do { } while (0)
#endif
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+#define PMD_DRV_LOG(level, sc, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, sc, fmt "\n", ## args)
#ifdef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
-#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DEBUG_PERIODIC_LOG(level, sc, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(%s): " fmt "\n", __func__, \
+ (sc)->devinfo.name ? (sc)->devinfo.name : "", ## args)
#else
-#define PMD_DEBUG_PERIODIC_LOG(level, fmt, args...) do { } while(0)
+#define PMD_DEBUG_PERIODIC_LOG(level, sc, fmt, args...) do { } while (0)
#endif
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index a0d4ac92..fdbc90e9 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -78,7 +78,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct bnx2x_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (NULL == rxq) {
- PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
+ PMD_DRV_LOG(ERR, sc, "rte_zmalloc for rxq failed!");
return -ENOMEM;
}
rxq->sc = sc;
@@ -94,7 +94,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev,
sc->rx_ring_size = USABLE_RX_BD(rxq);
rxq->nb_cq_pages = RCQ_BD_PAGES(rxq);
- PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, usable_bd=%lu, "
+ PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, usable_bd=%lu, "
"total_bd=%lu, rx_pages=%u, cq_pages=%u",
queue_idx, nb_desc, (unsigned long)USABLE_RX_BD(rxq),
(unsigned long)TOTAL_RX_BD(rxq), rxq->nb_rx_pages,
@@ -276,7 +276,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->tx_free_thresh = min(txq->tx_free_thresh,
txq->nb_tx_desc - BDS_PER_TX_PKT);
- PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
+ PMD_DRV_LOG(DEBUG, sc, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
"total_bd=%lu, tx_pages=%u",
queue_idx, nb_desc, txq->tx_free_thresh,
(unsigned long)USABLE_TX_BD(txq),
@@ -302,7 +302,7 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
return -ENOMEM;
}
- /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
+ /* PMD_DRV_LOG(DEBUG, sc, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); */
/* Link TX pages */
@@ -311,7 +311,9 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
busaddr = txq->tx_ring_phys_addr + BNX2X_PAGE_SIZE * (i % txq->nb_tx_pages);
tx_n_bd->addr_hi = rte_cpu_to_le_32(U64_HI(busaddr));
tx_n_bd->addr_lo = rte_cpu_to_le_32(U64_LO(busaddr));
- /* PMD_DRV_LOG(DEBUG, "link tx page %lu", (TOTAL_TX_BD_PER_PAGE * i - 1)); */
+ /* PMD_DRV_LOG(DEBUG, sc, "link tx page %lu",
+ * (TOTAL_TX_BD_PER_PAGE * i - 1));
+ */
}
txq->queue_id = queue_idx;
@@ -461,9 +463,12 @@ bnx2x_dev_rx_init(struct rte_eth_dev *dev)
void
bnx2x_dev_clear_queues(struct rte_eth_dev *dev)
{
+#ifdef RTE_LIBRTE_BNX2X_DEBUG_INIT
+ struct bnx2x_softc *sc = dev->data->dev_private;
+#endif
uint8_t i;
- PMD_INIT_FUNC_TRACE();
+ PMD_INIT_FUNC_TRACE(sc);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct bnx2x_tx_queue *txq = dev->data->tx_queues[i];
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index b9b85963..2ace81ad 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -84,7 +84,7 @@ bnx2x_storm_stats_post(struct bnx2x_softc *sc)
sc->fw_stats_req->hdr.drv_stats_counter =
htole16(sc->stats_counter++);
- PMD_DEBUG_PERIODIC_LOG(DEBUG,
+ PMD_DEBUG_PERIODIC_LOG(DEBUG, sc,
"sending statistics ramrod %d",
le16toh(sc->fw_stats_req->hdr.drv_stats_counter));
@@ -156,7 +156,7 @@ bnx2x_stats_comp(struct bnx2x_softc *sc)
while (*stats_comp != DMAE_COMP_VAL) {
if (!cnt) {
- PMD_DRV_LOG(ERR, "Timeout waiting for stats finished");
+ PMD_DRV_LOG(ERR, sc, "Timeout waiting for stats finished");
break;
}
@@ -191,7 +191,7 @@ bnx2x_stats_pmf_update(struct bnx2x_softc *sc)
}
/* sanity */
if (!sc->port.pmf || !sc->port.port_stx) {
- PMD_DRV_LOG(ERR, "BUG!");
+ PMD_DRV_LOG(ERR, sc, "BUG!");
return;
}
@@ -241,7 +241,7 @@ bnx2x_port_stats_init(struct bnx2x_softc *sc)
/* sanity */
if (!sc->link_vars.link_up || !sc->port.pmf) {
- PMD_DRV_LOG(ERR, "BUG!");
+ PMD_DRV_LOG(ERR, sc, "BUG!");
return;
}
@@ -465,7 +465,7 @@ bnx2x_func_stats_init(struct bnx2x_softc *sc)
/* sanity */
if (!sc->func_stx) {
- PMD_DRV_LOG(ERR, "BUG!");
+ PMD_DRV_LOG(ERR, sc, "BUG!");
return;
}
@@ -799,12 +799,12 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc)
break;
case ELINK_MAC_TYPE_NONE: /* unreached */
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"stats updated by DMAE but no MAC active");
return -1;
default: /* unreached */
- PMD_DRV_LOG(ERR, "stats update failed, unknown MAC type");
+ PMD_DRV_LOG(ERR, sc, "stats update failed, unknown MAC type");
}
ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
@@ -839,7 +839,7 @@ bnx2x_hw_stats_update(struct bnx2x_softc *sc)
nig_timer_max = SHMEM_RD(sc, port_mb[SC_PORT(sc)].stat_nig_timer);
if (nig_timer_max != estats->nig_timer_max) {
estats->nig_timer_max = nig_timer_max;
- PMD_DRV_LOG(ERR, "invalid NIG timer max (%u)",
+ PMD_DRV_LOG(ERR, sc, "invalid NIG timer max (%u)",
estats->nig_timer_max);
}
}
@@ -861,7 +861,7 @@ bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
/* are storm stats valid? */
if (le16toh(counters->xstats_counter) != cur_stats_counter) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"stats not updated by xstorm, "
"counter 0x%x != stats_counter 0x%x",
le16toh(counters->xstats_counter), sc->stats_counter);
@@ -869,7 +869,7 @@ bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
}
if (le16toh(counters->ustats_counter) != cur_stats_counter) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"stats not updated by ustorm, "
"counter 0x%x != stats_counter 0x%x",
le16toh(counters->ustats_counter), sc->stats_counter);
@@ -877,7 +877,7 @@ bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
}
if (le16toh(counters->cstats_counter) != cur_stats_counter) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"stats not updated by cstorm, "
"counter 0x%x != stats_counter 0x%x",
le16toh(counters->cstats_counter), sc->stats_counter);
@@ -885,7 +885,7 @@ bnx2x_storm_stats_validate_counters(struct bnx2x_softc *sc)
}
if (le16toh(counters->tstats_counter) != cur_stats_counter) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"stats not updated by tstorm, "
"counter 0x%x != stats_counter 0x%x",
le16toh(counters->tstats_counter), sc->stats_counter);
@@ -931,12 +931,13 @@ bnx2x_storm_stats_update(struct bnx2x_softc *sc)
uint32_t diff;
- /* PMD_DRV_LOG(DEBUG,
+ /* PMD_DRV_LOG(DEBUG, sc,
"queue[%d]: ucast_sent 0x%x bcast_sent 0x%x mcast_sent 0x%x",
i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent,
xclient->mcast_pkts_sent);
- PMD_DRV_LOG(DEBUG, "---------------"); */
+ PMD_DRV_LOG(DEBUG, sc, "---------------");
+ */
UPDATE_QSTAT(tclient->rcv_bcast_bytes,
total_broadcast_bytes_received);
@@ -1290,7 +1291,7 @@ void bnx2x_stats_handle(struct bnx2x_softc *sc, enum bnx2x_stats_event event)
bnx2x_stats_stm[state][event].action(sc);
if (event != STATS_EVENT_UPDATE) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"state %d -> event %d -> state %d",
state, event, sc->stats_state);
}
@@ -1304,7 +1305,7 @@ bnx2x_port_stats_base_init(struct bnx2x_softc *sc)
/* sanity */
if (!sc->port.pmf || !sc->port.port_stx) {
- PMD_DRV_LOG(ERR, "BUG!");
+ PMD_DRV_LOG(ERR, sc, "BUG!");
return;
}
@@ -1476,7 +1477,7 @@ bnx2x_stats_init(struct bnx2x_softc *sc)
sc->func_stx = 0;
}
- PMD_DRV_LOG(DEBUG, "port_stx 0x%x func_stx 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "port_stx 0x%x func_stx 0x%x",
sc->port.port_stx, sc->func_stx);
/* pmf should retrieve port statistics from SP on a non-init*/
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c
index 3c08f2a2..dba751c8 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/bnx2x/bnx2x_vfpf.c
@@ -40,12 +40,12 @@ bnx2x_check_bull(struct bnx2x_softc *sc)
if (bull->crc == bnx2x_vf_crc(bull))
break;
- PMD_DRV_LOG(ERR, "bad crc on bulletin board. contained %x computed %x",
+ PMD_DRV_LOG(ERR, sc, "bad crc on bulletin board. contained %x computed %x",
bull->crc, bnx2x_vf_crc(bull));
++tries;
}
if (tries == BNX2X_VF_BULLETIN_TRIES) {
- PMD_DRV_LOG(ERR, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting",
+ PMD_DRV_LOG(ERR, sc, "pf to vf bulletin board crc was wrong %d consecutive times. Aborting",
tries);
return FALSE;
}
@@ -85,7 +85,7 @@ bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv,
rte_spinlock_lock(&sc->vf2pf_lock);
- PMD_DRV_LOG(DEBUG, "Preparing %d tlv for sending", type);
+ PMD_DRV_LOG(DEBUG, sc, "Preparing %d tlv for sending", type);
memset(mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
@@ -100,7 +100,7 @@ static void
bnx2x_vf_finalize(struct bnx2x_softc *sc,
__rte_unused struct vf_first_tlv *first_tlv)
{
- PMD_DRV_LOG(DEBUG, "done sending [%d] tlv over vf pf channel",
+ PMD_DRV_LOG(DEBUG, sc, "done sending [%d] tlv over vf pf channel",
first_tlv->tl.type);
rte_spinlock_unlock(&sc->vf2pf_lock);
@@ -119,14 +119,14 @@ bnx2x_do_req4pf(struct bnx2x_softc *sc, rte_iova_t phys_addr)
uint8_t i;
if (*status) {
- PMD_DRV_LOG(ERR, "status should be zero before message"
+ PMD_DRV_LOG(ERR, sc, "status should be zero before message"
" to pf was sent");
return -EINVAL;
}
bnx2x_check_bull(sc);
if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
- PMD_DRV_LOG(ERR, "channel is down. Aborting message sending");
+ PMD_DRV_LOG(ERR, sc, "channel is down. Aborting message sending");
return -EINVAL;
}
@@ -146,11 +146,11 @@ bnx2x_do_req4pf(struct bnx2x_softc *sc, rte_iova_t phys_addr)
}
if (!*status) {
- PMD_DRV_LOG(ERR, "Response from PF timed out");
+ PMD_DRV_LOG(ERR, sc, "Response from PF timed out");
return -EAGAIN;
}
- PMD_DRV_LOG(DEBUG, "Response from PF was received");
+ PMD_DRV_LOG(DEBUG, sc, "Response from PF was received");
return 0;
}
@@ -198,7 +198,7 @@ int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
int rc;
do {
- PMD_DRV_LOG(DEBUG, "trying to get resources");
+ PMD_DRV_LOG(DEBUG, sc, "trying to get resources");
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc)
@@ -210,11 +210,11 @@ int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
/* check PF to request acceptance */
if (sc_resp->status == BNX2X_VF_STATUS_SUCCESS) {
- PMD_DRV_LOG(DEBUG, "resources obtained successfully");
+ PMD_DRV_LOG(DEBUG, sc, "resources obtained successfully");
res_obtained = true;
} else if (sc_resp->status == BNX2X_VF_STATUS_NO_RESOURCES &&
tries < BNX2X_VF_OBTAIN_MAX_TRIES) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"PF cannot allocate requested amount of resources");
res_query = &sc->vf2pf_mbox->query[0].acquire.res_query;
@@ -230,7 +230,7 @@ int bnx2x_loop_obtain_resources(struct bnx2x_softc *sc)
memset(&sc->vf2pf_mbox->resp, 0, sizeof(union resp_tlvs));
} else {
- PMD_DRV_LOG(ERR, "Failed to get the requested "
+ PMD_DRV_LOG(ERR, sc, "Failed to get the requested "
"amount of resources: %d.",
sc_resp->status);
return -EINVAL;
@@ -299,7 +299,7 @@ int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_
sc->doorbell_size = sc_resp.db_size;
sc->flags |= BNX2X_NO_WOL_FLAG | BNX2X_NO_ISCSI_OOO_FLAG | BNX2X_NO_ISCSI_FLAG | BNX2X_NO_FCOE_FLAG;
- PMD_DRV_LOG(DEBUG, "status block count = %d, base status block = %x",
+ PMD_DRV_LOG(DEBUG, sc, "status block count = %d, base status block = %x",
sc->igu_sb_cnt, sc->igu_base_sb);
strncpy(sc->fw_ver, sc_resp.fw_ver, sizeof(sc->fw_ver));
@@ -336,7 +336,7 @@ bnx2x_vf_close(struct bnx2x_softc *sc)
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
- PMD_DRV_LOG(ERR, "Failed to release VF");
+ PMD_DRV_LOG(ERR, sc, "Failed to release VF");
bnx2x_vf_finalize(sc, &query->first_tlv);
}
@@ -370,12 +370,12 @@ bnx2x_vf_init(struct bnx2x_softc *sc)
if (rc)
goto out;
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to init VF");
+ PMD_DRV_LOG(ERR, sc, "Failed to init VF");
rc = -EINVAL;
goto out;
}
- PMD_DRV_LOG(DEBUG, "VF was initialized");
+ PMD_DRV_LOG(DEBUG, sc, "VF was initialized");
out:
bnx2x_vf_finalize(sc, &query->first_tlv);
return rc;
@@ -406,7 +406,7 @@ bnx2x_vf_unload(struct bnx2x_softc *sc)
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Bad reply for vf_q %d teardown", i);
bnx2x_vf_finalize(sc, &query_op->first_tlv);
@@ -426,7 +426,7 @@ bnx2x_vf_unload(struct bnx2x_softc *sc)
rc = bnx2x_do_req4pf(sc, sc->vf2pf_mbox_mapping.paddr);
if (rc || reply->status != BNX2X_VF_STATUS_SUCCESS)
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Bad reply from PF for close message");
bnx2x_vf_finalize(sc, &query->first_tlv);
@@ -453,7 +453,7 @@ bnx2x_vf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
rxq = sc->rx_queues[fp->index];
if (!rxq) {
- PMD_DRV_LOG(ERR, "RX queue %d is NULL", fp->index);
+ PMD_DRV_LOG(ERR, sc, "RX queue %d is NULL", fp->index);
return;
}
@@ -477,7 +477,7 @@ bnx2x_vf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
txq = sc->tx_queues[fp->index];
if (!txq) {
- PMD_DRV_LOG(ERR, "TX queue %d is NULL", fp->index);
+ PMD_DRV_LOG(ERR, sc, "TX queue %d is NULL", fp->index);
return;
}
@@ -514,7 +514,7 @@ bnx2x_vf_setup_queue(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp, int lead
if (rc)
goto out;
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to setup VF queue[%d]",
+ PMD_DRV_LOG(ERR, sc, "Failed to setup VF queue[%d]",
fp->index);
rc = -EINVAL;
}
@@ -569,7 +569,7 @@ bnx2x_vf_set_mac(struct bnx2x_softc *sc, int set)
}
if (BNX2X_VF_STATUS_SUCCESS != reply->status) {
- PMD_DRV_LOG(ERR, "Bad reply from PF for SET MAC message: %d",
+ PMD_DRV_LOG(ERR, sc, "Bad reply from PF for SET MAC message: %d",
reply->status);
rc = -EINVAL;
}
@@ -611,7 +611,7 @@ bnx2x_vf_config_rss(struct bnx2x_softc *sc,
goto out;
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure RSS");
+ PMD_DRV_LOG(ERR, sc, "Failed to configure RSS");
rc = -EINVAL;
}
out:
@@ -655,7 +655,7 @@ bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
break;
default:
- PMD_DRV_LOG(ERR, "BAD rx mode (%d)", sc->rx_mode);
+ PMD_DRV_LOG(ERR, sc, "BAD rx mode (%d)", sc->rx_mode);
rc = -EINVAL;
goto out;
}
@@ -669,7 +669,7 @@ bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
goto out;
if (reply->status != BNX2X_VF_STATUS_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to set RX mode");
+ PMD_DRV_LOG(ERR, sc, "Failed to set RX mode");
rc = -EINVAL;
}
diff --git a/drivers/net/bnx2x/ecore_init.h b/drivers/net/bnx2x/ecore_init.h
index 4576c565..9f5c7f7f 100644
--- a/drivers/net/bnx2x/ecore_init.h
+++ b/drivers/net/bnx2x/ecore_init.h
@@ -743,7 +743,7 @@ static inline void ecore_disable_blocks_parity(struct bnx2x_softc *sc)
if (dis_mask) {
REG_WR(sc, ecore_blocks_parity_data[i].mask_addr,
dis_mask);
- ECORE_MSG("Setting parity mask "
+ ECORE_MSG(sc, "Setting parity mask "
"for %s to\t\t0x%x",
ecore_blocks_parity_data[i].name, dis_mask);
}
@@ -778,7 +778,7 @@ static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
reg_val = REG_RD(sc, ecore_blocks_parity_data[i].
sts_clr_addr);
if (reg_val & reg_mask)
- ECORE_MSG("Parity errors in %s: 0x%x",
+ ECORE_MSG(sc, "Parity errors in %s: 0x%x",
ecore_blocks_parity_data[i].name,
reg_val & reg_mask);
}
@@ -787,7 +787,7 @@ static inline void ecore_clear_blocks_parity(struct bnx2x_softc *sc)
/* Check if there were parity attentions in MCP */
reg_val = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_MCP);
if (reg_val & mcp_aeu_bits)
- ECORE_MSG("Parity error in MCP: 0x%x",
+ ECORE_MSG(sc, "Parity error in MCP: 0x%x",
reg_val & mcp_aeu_bits);
/* Clear parity attentions in MCP:
diff --git a/drivers/net/bnx2x/ecore_init_ops.h b/drivers/net/bnx2x/ecore_init_ops.h
index b6f98324..474185bd 100644
--- a/drivers/net/bnx2x/ecore_init_ops.h
+++ b/drivers/net/bnx2x/ecore_init_ops.h
@@ -426,20 +426,20 @@ static void ecore_init_pxp_arb(struct bnx2x_softc *sc, int r_order,
uint32_t val, i;
if (r_order > MAX_RD_ORD) {
- ECORE_MSG("read order of %d order adjusted to %d",
+ ECORE_MSG(sc, "read order of %d order adjusted to %d",
r_order, MAX_RD_ORD);
r_order = MAX_RD_ORD;
}
if (w_order > MAX_WR_ORD) {
- ECORE_MSG("write order of %d order adjusted to %d",
+ ECORE_MSG(sc, "write order of %d order adjusted to %d",
w_order, MAX_WR_ORD);
w_order = MAX_WR_ORD;
}
if (CHIP_REV_IS_FPGA(sc)) {
- ECORE_MSG("write order adjusted to 1 for FPGA");
+ ECORE_MSG(sc, "write order adjusted to 1 for FPGA");
w_order = 0;
}
- ECORE_MSG("read order %d write order %d", r_order, w_order);
+ ECORE_MSG(sc, "read order %d write order %d", r_order, w_order);
for (i = 0; i < NUM_RD_Q-1; i++) {
REG_WR(sc, read_arb_addr[i].l, read_arb_data[i][r_order].l);
diff --git a/drivers/net/bnx2x/ecore_reg.h b/drivers/net/bnx2x/ecore_reg.h
index d8203b45..33cea4eb 100644
--- a/drivers/net/bnx2x/ecore_reg.h
+++ b/drivers/net/bnx2x/ecore_reg.h
@@ -1969,6 +1969,7 @@
#define HW_LOCK_MAX_RESOURCE_VALUE 31
#define HW_LOCK_RESOURCE_DRV_FLAGS 10
#define HW_LOCK_RESOURCE_GPIO 1
+#define HW_LOCK_RESOURCE_MDIO 0
#define HW_LOCK_RESOURCE_NVRAM 12
#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index ef7f9fea..4e400d51 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -55,14 +55,14 @@ ecore_exe_queue_init(struct bnx2x_softc *sc __rte_unused,
o->execute = exec;
o->get = get;
- ECORE_MSG("Setup the execution queue with the chunk length of %d",
+ ECORE_MSG(sc, "Setup the execution queue with the chunk length of %d",
exe_len);
}
static void ecore_exe_queue_free_elem(struct bnx2x_softc *sc __rte_unused,
struct ecore_exeq_elem *elem)
{
- ECORE_MSG("Deleting an exe_queue element");
+ ECORE_MSG(sc, "Deleting an exe_queue element");
ECORE_FREE(sc, elem, sizeof(*elem));
}
@@ -108,7 +108,7 @@ static int ecore_exe_queue_add(struct bnx2x_softc *sc,
/* Check if this request is ok */
rc = o->validate(sc, o->owner, elem);
if (rc) {
- ECORE_MSG("Preamble failed: %d", rc);
+ ECORE_MSG(sc, "Preamble failed: %d", rc);
goto free_and_exit;
}
}
@@ -178,8 +178,8 @@ static int ecore_exe_queue_step(struct bnx2x_softc *sc,
*/
if (!ECORE_LIST_IS_EMPTY(&o->pending_comp)) {
if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
- ECORE_MSG
- ("RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
+ ECORE_MSG(sc,
+ "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list");
__ecore_exe_queue_reset_pending(sc, o);
} else {
return ECORE_PENDING;
@@ -242,7 +242,7 @@ static struct ecore_exeq_elem *ecore_exe_queue_alloc_elem(struct
bnx2x_softc *sc
__rte_unused)
{
- ECORE_MSG("Allocating a new exe_queue element");
+ ECORE_MSG(sc, "Allocating a new exe_queue element");
return ECORE_ZALLOC(sizeof(struct ecore_exeq_elem), GFP_ATOMIC, sc);
}
@@ -292,14 +292,14 @@ static int ecore_state_wait(struct bnx2x_softc *sc, int state,
if (CHIP_REV_IS_EMUL(sc))
cnt *= 20;
- ECORE_MSG("waiting for state to become %d", state);
+ ECORE_MSG(sc, "waiting for state to become %d", state);
ECORE_MIGHT_SLEEP();
while (cnt--) {
bnx2x_intr_legacy(sc, 1);
if (!ECORE_TEST_BIT(state, pstate)) {
#ifdef ECORE_STOP_ON_ERROR
- ECORE_MSG("exit (cnt %d)", 5000 - cnt);
+ ECORE_MSG(sc, "exit (cnt %d)", 5000 - cnt);
#endif
return ECORE_SUCCESS;
}
@@ -311,7 +311,7 @@ static int ecore_state_wait(struct bnx2x_softc *sc, int state,
}
/* timeout! */
- PMD_DRV_LOG(ERR, "timeout waiting for state %d", state);
+ PMD_DRV_LOG(ERR, sc, "timeout waiting for state %d", state);
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif
@@ -372,11 +372,11 @@ static int __ecore_vlan_mac_h_write_trylock(struct bnx2x_softc *sc __rte_unused,
struct ecore_vlan_mac_obj *o)
{
if (o->head_reader) {
- ECORE_MSG("vlan_mac_lock writer - There are readers; Busy");
+ ECORE_MSG(sc, "vlan_mac_lock writer - There are readers; Busy");
return ECORE_BUSY;
}
- ECORE_MSG("vlan_mac_lock writer - Taken");
+ ECORE_MSG(sc, "vlan_mac_lock writer - Taken");
return ECORE_SUCCESS;
}
@@ -396,13 +396,13 @@ static void __ecore_vlan_mac_h_exec_pending(struct bnx2x_softc *sc,
int rc;
unsigned long ramrod_flags = o->saved_ramrod_flags;
- ECORE_MSG("vlan_mac_lock execute pending command with ramrod flags %lu",
+ ECORE_MSG(sc, "vlan_mac_lock execute pending command with ramrod flags %lu",
ramrod_flags);
o->head_exe_request = FALSE;
o->saved_ramrod_flags = 0;
rc = ecore_exe_queue_step(sc, &o->exe_queue, &ramrod_flags);
if (rc != ECORE_SUCCESS) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"execution of pending commands failed with rc %d",
rc);
#ifdef ECORE_STOP_ON_ERROR
@@ -427,7 +427,7 @@ static void __ecore_vlan_mac_h_pend(struct bnx2x_softc *sc __rte_unused,
{
o->head_exe_request = TRUE;
o->saved_ramrod_flags = ramrod_flags;
- ECORE_MSG("Placing pending execution with ramrod flags %lu",
+ ECORE_MSG(sc, "Placing pending execution with ramrod flags %lu",
ramrod_flags);
}
@@ -448,8 +448,8 @@ static void __ecore_vlan_mac_h_write_unlock(struct bnx2x_softc *sc,
* executed. If so, execute again. [Ad infinitum]
*/
while (o->head_exe_request) {
- ECORE_MSG
- ("vlan_mac_lock - writer release encountered a pending request");
+ ECORE_MSG(sc,
+ "vlan_mac_lock - writer release encountered a pending request");
__ecore_vlan_mac_h_exec_pending(sc, o);
}
}
@@ -485,7 +485,8 @@ static int __ecore_vlan_mac_h_read_lock(struct bnx2x_softc *sc __rte_unused,
{
/* If we got here, we're holding lock --> no WRITER exists */
o->head_reader++;
- ECORE_MSG("vlan_mac_lock - locked reader - number %d", o->head_reader);
+ ECORE_MSG(sc,
+ "vlan_mac_lock - locked reader - number %d", o->head_reader);
return ECORE_SUCCESS;
}
@@ -524,14 +525,14 @@ static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
struct ecore_vlan_mac_obj *o)
{
if (!o->head_reader) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Need to release vlan mac reader lock, but lock isn't taken");
#ifdef ECORE_STOP_ON_ERROR
ecore_panic();
#endif
} else {
o->head_reader--;
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"vlan_mac_lock - decreased readers to %d",
o->head_reader);
}
@@ -540,7 +541,7 @@ static void __ecore_vlan_mac_h_read_unlock(struct bnx2x_softc *sc,
* was last - if so we need to execute the command.
*/
if (!o->head_reader && o->head_exe_request) {
- PMD_DRV_LOG(INFO,
+ PMD_DRV_LOG(INFO, sc,
"vlan_mac_lock - reader release encountered a pending request");
/* Writer release will do the trick */
@@ -583,10 +584,10 @@ static int ecore_get_n_elements(struct bnx2x_softc *sc,
uint8_t *next = base;
int counter = 0, read_lock;
- ECORE_MSG("get_n_elements - taking vlan_mac_lock (reader)");
+ ECORE_MSG(sc, "get_n_elements - taking vlan_mac_lock (reader)");
read_lock = ecore_vlan_mac_h_read_lock(sc, o);
if (read_lock != ECORE_SUCCESS)
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"get_n_elements failed to get vlan mac reader lock; Access without lock");
/* traverse list */
@@ -595,15 +596,15 @@ static int ecore_get_n_elements(struct bnx2x_softc *sc,
if (counter < n) {
ECORE_MEMCPY(next, &pos->u, size);
counter++;
- ECORE_MSG
- ("copied element number %d to address %p element was:",
+ ECORE_MSG
+ (sc, "copied element number %d to address %p element was:",
counter, next);
next += stride + size;
}
}
if (read_lock == ECORE_SUCCESS) {
- ECORE_MSG("get_n_elements - releasing vlan_mac_lock (reader)");
+ ECORE_MSG(sc, "get_n_elements - releasing vlan_mac_lock (reader)");
ecore_vlan_mac_h_read_unlock(sc, o);
}
@@ -617,7 +618,7 @@ static int ecore_check_mac_add(struct bnx2x_softc *sc __rte_unused,
{
struct ecore_vlan_mac_registry_elem *pos;
- ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
+ ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for ADD command",
data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
@@ -646,7 +647,7 @@ static struct ecore_vlan_mac_registry_elem *ecore_check_mac_del(struct bnx2x_sof
{
struct ecore_vlan_mac_registry_elem *pos;
- ECORE_MSG("Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
+ ECORE_MSG(sc, "Checking MAC %02x:%02x:%02x:%02x:%02x:%02x for DEL command",
data->mac.mac[0], data->mac.mac[1], data->mac.mac[2],
data->mac.mac[3], data->mac.mac[4], data->mac.mac[5]);
@@ -724,7 +725,7 @@ static void ecore_set_mac_in_nig(struct bnx2x_softc *sc,
if (index > ECORE_LLH_CAM_MAX_PF_LINE)
return;
- ECORE_MSG("Going to %s LLH configuration at entry %d",
+ ECORE_MSG(sc, "Going to %s LLH configuration at entry %d",
(add ? "ADD" : "DELETE"), index);
if (add) {
@@ -840,7 +841,7 @@ static void ecore_set_one_mac_e2(struct bnx2x_softc *sc,
ecore_vlan_mac_set_cmd_hdr_e2(o, add, CLASSIFY_RULE_OPCODE_MAC,
&rule_entry->mac.header);
- ECORE_MSG("About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
+ ECORE_MSG(sc, "About to %s MAC %02x:%02x:%02x:%02x:%02x:%02x for Queue %d",
(add ? "add" : "delete"), mac[0], mac[1], mac[2], mac[3],
mac[4], mac[5], raw->cl_id);
@@ -945,7 +946,7 @@ static void ecore_vlan_mac_set_rdata_e1x(struct bnx2x_softc *sc
ecore_vlan_mac_set_cfg_entry_e1x(o, add, opcode, mac, vlan_id,
cfg_entry);
- ECORE_MSG("%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
+ ECORE_MSG(sc, "%s MAC %02x:%02x:%02x:%02x:%02x:%02x CLID %d CAM offset %d",
(add ? "setting" : "clearing"),
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
o->raw.cl_id, cam_offset);
@@ -1090,8 +1091,8 @@ static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
/* Check the registry */
rc = o->check_add(sc, o, &elem->cmd_data.vlan_mac.u);
if (rc) {
- ECORE_MSG
- ("ADD command is not allowed considering current registry state.");
+ ECORE_MSG(sc,
+ "ADD command is not allowed considering current registry state.");
return rc;
}
@@ -1099,7 +1100,7 @@ static int ecore_validate_vlan_mac_add(struct bnx2x_softc *sc,
* MAC/VLAN/VLAN-MAC. Return an error if there is.
*/
if (exeq->get(exeq, elem)) {
- ECORE_MSG("There is a pending ADD command already");
+ ECORE_MSG(sc, "There is a pending ADD command already");
return ECORE_EXISTS;
}
@@ -1138,8 +1139,8 @@ static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
*/
pos = o->check_del(sc, o, &elem->cmd_data.vlan_mac.u);
if (!pos) {
- ECORE_MSG
- ("DEL command is not allowed considering current registry state");
+ ECORE_MSG(sc,
+ "DEL command is not allowed considering current registry state");
return ECORE_EXISTS;
}
@@ -1151,13 +1152,13 @@ static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
/* Check for MOVE commands */
query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_MOVE;
if (exeq->get(exeq, &query_elem)) {
- PMD_DRV_LOG(ERR, "There is a pending MOVE command already");
+ PMD_DRV_LOG(ERR, sc, "There is a pending MOVE command already");
return ECORE_INVAL;
}
/* Check for DEL commands */
if (exeq->get(exeq, elem)) {
- ECORE_MSG("There is a pending DEL command already");
+ ECORE_MSG(sc, "There is a pending DEL command already");
return ECORE_EXISTS;
}
@@ -1165,7 +1166,7 @@ static int ecore_validate_vlan_mac_del(struct bnx2x_softc *sc,
if (!(ECORE_TEST_BIT(ECORE_DONT_CONSUME_CAM_CREDIT,
&elem->cmd_data.vlan_mac.vlan_mac_flags) ||
o->put_credit(o))) {
- PMD_DRV_LOG(ERR, "Failed to return a credit");
+ PMD_DRV_LOG(ERR, sc, "Failed to return a credit");
return ECORE_INVAL;
}
@@ -1198,8 +1199,8 @@ static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
* state.
*/
if (!src_o->check_move(sc, src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
- ECORE_MSG
- ("MOVE command is not allowed considering current registry state");
+ ECORE_MSG(sc,
+ "MOVE command is not allowed considering current registry state");
return ECORE_INVAL;
}
@@ -1212,21 +1213,21 @@ static int ecore_validate_vlan_mac_move(struct bnx2x_softc *sc,
/* Check DEL on source */
query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_DEL;
if (src_exeq->get(src_exeq, &query_elem)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"There is a pending DEL command on the source queue already");
return ECORE_INVAL;
}
/* Check MOVE on source */
if (src_exeq->get(src_exeq, elem)) {
- ECORE_MSG("There is a pending MOVE command already");
+ ECORE_MSG(sc, "There is a pending MOVE command already");
return ECORE_EXISTS;
}
/* Check ADD on destination */
query_elem.cmd_data.vlan_mac.cmd = ECORE_VLAN_MAC_ADD;
if (dest_exeq->get(dest_exeq, &query_elem)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"There is a pending ADD command on the destination queue already");
return ECORE_INVAL;
}
@@ -1331,7 +1332,7 @@ static int __ecore_vlan_mac_execute_step(struct bnx2x_softc *sc,
ECORE_SPIN_LOCK_BH(&o->exe_queue.lock);
- ECORE_MSG("vlan_mac_execute_step - trying to take writer lock");
+ ECORE_MSG(sc, "vlan_mac_execute_step - trying to take writer lock");
rc = __ecore_vlan_mac_h_write_trylock(sc, o);
if (rc != ECORE_SUCCESS) {
@@ -1428,17 +1429,17 @@ static int ecore_optimize_vlan_mac(struct bnx2x_softc *sc,
&pos->cmd_data.vlan_mac.vlan_mac_flags)) {
if ((query.cmd_data.vlan_mac.cmd ==
ECORE_VLAN_MAC_ADD) && !o->put_credit(o)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Failed to return the credit for the optimized ADD command");
return ECORE_INVAL;
} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Failed to recover the credit from the optimized DEL command");
return ECORE_INVAL;
}
}
- ECORE_MSG("Optimizing %s command",
+ ECORE_MSG(sc, "Optimizing %s command",
(elem->cmd_data.vlan_mac.cmd == ECORE_VLAN_MAC_ADD) ?
"ADD" : "DEL");
@@ -1488,7 +1489,7 @@ static int ecore_vlan_mac_get_registry_elem(struct bnx2x_softc *sc,
return ECORE_INVAL;
}
- ECORE_MSG("Got cam offset %d", reg_elem->cam_offset);
+ ECORE_MSG(sc, "Got cam offset %d", reg_elem->cam_offset);
/* Set a VLAN-MAC data */
ECORE_MEMCPY(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
@@ -1697,8 +1698,8 @@ int ecore_config_vlan_mac(struct bnx2x_softc *sc,
rc = ECORE_PENDING;
if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
- ECORE_MSG
- ("RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
+ ECORE_MSG(sc,
+ "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.");
raw->clear_pending(raw);
}
@@ -1777,7 +1778,7 @@ static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
*vlan_mac_flags) {
rc = exeq->remove(sc, exeq->owner, exeq_pos);
if (rc) {
- PMD_DRV_LOG(ERR, "Failed to remove command");
+ PMD_DRV_LOG(ERR, sc, "Failed to remove command");
ECORE_SPIN_UNLOCK_BH(&exeq->lock);
return rc;
}
@@ -1802,7 +1803,7 @@ static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
ECORE_CLEAR_BIT_NA(RAMROD_EXEC, &p.ramrod_flags);
ECORE_CLEAR_BIT_NA(RAMROD_CONT, &p.ramrod_flags);
- ECORE_MSG("vlan_mac_del_all -- taking vlan_mac_lock (reader)");
+ ECORE_MSG(sc, "vlan_mac_del_all -- taking vlan_mac_lock (reader)");
read_lock = ecore_vlan_mac_h_read_lock(sc, o);
if (read_lock != ECORE_SUCCESS)
return read_lock;
@@ -1814,7 +1815,7 @@ static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
ECORE_MEMCPY(&p.user_req.u, &pos->u, sizeof(pos->u));
rc = ecore_config_vlan_mac(sc, &p);
if (rc < 0) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Failed to add a new DEL command");
ecore_vlan_mac_h_read_unlock(sc, o);
return rc;
@@ -1822,7 +1823,7 @@ static int ecore_vlan_mac_del_all(struct bnx2x_softc *sc,
}
}
- ECORE_MSG("vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
+ ECORE_MSG(sc, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)");
ecore_vlan_mac_h_read_unlock(sc, o);
p.ramrod_flags = *ramrod_flags;
@@ -2009,7 +2010,7 @@ static int ecore_set_rx_mode_e1x(struct bnx2x_softc *sc,
mac_filters->unmatched_unicast | mask :
mac_filters->unmatched_unicast & ~mask;
- ECORE_MSG("drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
+ ECORE_MSG(sc, "drop_ucast 0x%xdrop_mcast 0x%x accp_ucast 0x%x"
"accp_mcast 0x%xaccp_bcast 0x%x",
mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
@@ -2155,8 +2156,8 @@ static int ecore_set_rx_mode_e2(struct bnx2x_softc *sc,
*/
ecore_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
- ECORE_MSG
- ("About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
+ ECORE_MSG
+ (sc, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx",
data->header.rule_cnt, p->rx_accept_flags, p->tx_accept_flags);
/* No need for an explicit memory barrier here as long we would
@@ -2209,7 +2210,7 @@ int ecore_config_rx_mode(struct bnx2x_softc *sc,
return rc;
}
} else {
- ECORE_MSG("ERROR: config_rx_mode is NULL");
+ ECORE_MSG(sc, "ERROR: config_rx_mode is NULL");
return -1;
}
@@ -2290,7 +2291,7 @@ static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
if (!new_cmd)
return ECORE_NOMEM;
- ECORE_MSG("About to enqueue a new %d command. macs_list_len=%d",
+ ECORE_MSG(sc, "About to enqueue a new %d command. macs_list_len=%d",
cmd, macs_list_len);
ECORE_LIST_INIT(&new_cmd->data.macs_head);
@@ -2326,7 +2327,7 @@ static int ecore_mcast_enqueue_cmd(struct bnx2x_softc *sc __rte_unused,
default:
ECORE_FREE(sc, new_cmd, total_sz);
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
return ECORE_INVAL;
}
@@ -2438,11 +2439,11 @@ static void ecore_mcast_set_one_rule_e2(struct bnx2x_softc *sc __rte_unused,
break;
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
return;
}
- ECORE_MSG("%s bin %d",
+ ECORE_MSG(sc, "%s bin %d",
((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
"Setting" : "Clearing"), bin);
@@ -2477,7 +2478,7 @@ static int ecore_mcast_handle_restore_cmd_e2(struct bnx2x_softc *sc,
cnt++;
- ECORE_MSG("About to configure a bin %d", cur_bin);
+ ECORE_MSG(sc, "About to configure a bin %d", cur_bin);
/* Break if we reached the maximum number
* of rules.
@@ -2509,8 +2510,8 @@ static void ecore_mcast_hdl_pending_add_e2(struct bnx2x_softc *sc,
cnt++;
- ECORE_MSG
- ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
+ ECORE_MSG
+ (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
pmac_pos->mac[0], pmac_pos->mac[1], pmac_pos->mac[2],
pmac_pos->mac[3], pmac_pos->mac[4], pmac_pos->mac[5]);
@@ -2545,7 +2546,7 @@ static void ecore_mcast_hdl_pending_del_e2(struct bnx2x_softc *sc,
cmd_pos->data.macs_num--;
- ECORE_MSG("Deleting MAC. %d left,cnt is %d",
+ ECORE_MSG(sc, "Deleting MAC. %d left,cnt is %d",
cmd_pos->data.macs_num, cnt);
/* Break if we reached the maximum
@@ -2604,7 +2605,8 @@ static int ecore_mcast_handle_pending_cmds_e2(struct bnx2x_softc *sc, struct
break;
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd_pos->type);
+ PMD_DRV_LOG(ERR, sc,
+ "Unknown command: %d", cmd_pos->type);
return ECORE_INVAL;
}
@@ -2641,8 +2643,8 @@ static void ecore_mcast_hdl_add(struct bnx2x_softc *sc,
cnt++;
- ECORE_MSG
- ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
+ ECORE_MSG
+ (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC",
mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5]);
}
@@ -2662,7 +2664,8 @@ static void ecore_mcast_hdl_del(struct bnx2x_softc *sc,
cnt++;
- ECORE_MSG("Deleting MAC. %d left", p->mcast_list_len - i - 1);
+ ECORE_MSG(sc,
+ "Deleting MAC. %d left", p->mcast_list_len - i - 1);
}
*line_idx = cnt;
@@ -2688,7 +2691,7 @@ static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
struct ecore_mcast_obj *o = p->mcast_obj;
int cnt = start_cnt;
- ECORE_MSG("p->mcast_list_len=%d", p->mcast_list_len);
+ ECORE_MSG(sc, "p->mcast_list_len=%d", p->mcast_list_len);
switch (cmd) {
case ECORE_MCAST_CMD_ADD:
@@ -2704,7 +2707,7 @@ static int ecore_mcast_handle_current_cmd(struct bnx2x_softc *sc, struct
break;
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
return ECORE_INVAL;
}
@@ -2749,7 +2752,7 @@ static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
break;
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
return ECORE_INVAL;
}
@@ -2935,8 +2938,8 @@ static void ecore_mcast_hdl_add_e1h(struct bnx2x_softc *sc __rte_unused,
bit = ecore_mcast_bin_from_mac(mlist_pos->mac);
ECORE_57711_SET_MC_FILTER(mc_filter, bit);
- ECORE_MSG
- ("About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
+ ECORE_MSG
+ (sc, "About to configure %02x:%02x:%02x:%02x:%02x:%02x mcast MAC, bin %d",
mlist_pos->mac[0], mlist_pos->mac[1], mlist_pos->mac[2],
mlist_pos->mac[3], mlist_pos->mac[4], mlist_pos->mac[5],
bit);
@@ -2956,7 +2959,7 @@ static void ecore_mcast_hdl_restore_e1h(struct bnx2x_softc *sc
for (bit = ecore_mcast_get_next_bin(o, 0);
bit >= 0; bit = ecore_mcast_get_next_bin(o, bit + 1)) {
ECORE_57711_SET_MC_FILTER(mc_filter, bit);
- ECORE_MSG("About to set bin %d", bit);
+ ECORE_MSG(sc, "About to set bin %d", bit);
}
}
@@ -2987,7 +2990,7 @@ static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
break;
case ECORE_MCAST_CMD_DEL:
- ECORE_MSG("Invalidating multicast MACs configuration");
+ ECORE_MSG(sc, "Invalidating multicast MACs configuration");
/* clear the registry */
ECORE_MEMSET(o->registry.aprox_match.vec, 0,
@@ -2999,7 +3002,7 @@ static int ecore_mcast_setup_e1h(struct bnx2x_softc *sc,
break;
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", cmd);
return ECORE_INVAL;
}
@@ -3050,8 +3053,8 @@ int ecore_config_mcast(struct bnx2x_softc *sc,
if ((!p->mcast_list_len) && (!o->check_sched(o)))
return ECORE_SUCCESS;
- ECORE_MSG
- ("o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
+ ECORE_MSG
+ (sc, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d",
o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
/* Enqueue the current command to the pending list if we can't complete
@@ -3480,7 +3483,7 @@ static int ecore_setup_rss(struct bnx2x_softc *sc,
ECORE_MEMSET(data, 0, sizeof(*data));
- ECORE_MSG("Configuring RSS");
+ ECORE_MSG(sc, "Configuring RSS");
/* Set an echo field */
data->echo = ECORE_CPU_TO_LE32((r->cid & ECORE_SWCID_MASK) |
@@ -3494,7 +3497,7 @@ static int ecore_setup_rss(struct bnx2x_softc *sc,
data->rss_mode = rss_mode;
- ECORE_MSG("rss_mode=%d", rss_mode);
+ ECORE_MSG(sc, "rss_mode=%d", rss_mode);
/* RSS capabilities */
if (ECORE_TEST_BIT(ECORE_RSS_IPV4, &p->rss_flags))
@@ -3534,7 +3537,7 @@ static int ecore_setup_rss(struct bnx2x_softc *sc,
/* RSS engine ID */
data->rss_engine_id = o->engine_id;
- ECORE_MSG("rss_engine_id=%d", data->rss_engine_id);
+ ECORE_MSG(sc, "rss_engine_id=%d", data->rss_engine_id);
/* Indirection table */
ECORE_MEMCPY(data->indirection_table, p->ind_table,
@@ -3629,15 +3632,15 @@ int ecore_queue_state_change(struct bnx2x_softc *sc,
/* Check that the requested transition is legal */
rc = o->check_transition(sc, o, params);
if (rc) {
- PMD_DRV_LOG(ERR, "check transition returned an error. rc %d",
+ PMD_DRV_LOG(ERR, sc, "check transition returned an error. rc %d",
rc);
return ECORE_INVAL;
}
/* Set "pending" bit */
- ECORE_MSG("pending bit was=%lx", o->pending);
+ ECORE_MSG(sc, "pending bit was=%lx", o->pending);
pending_bit = o->set_pending(o, params);
- ECORE_MSG("pending bit now=%lx", o->pending);
+ ECORE_MSG(sc, "pending bit now=%lx", o->pending);
/* Don't send a command if only driver cleanup was requested */
if (ECORE_TEST_BIT(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
@@ -3704,7 +3707,7 @@ static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
unsigned long cur_pending = o->pending;
if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d",
cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->state,
cur_pending, o->next_state);
@@ -3715,15 +3718,15 @@ static int ecore_queue_comp_cmd(struct bnx2x_softc *sc __rte_unused,
/* >= because tx only must always be smaller than cos since the
* primary connection supports COS 0
*/
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"illegal value for next tx_only: %d. max cos was %d",
o->next_tx_only, o->max_cos);
- ECORE_MSG("Completing command %d for queue %d, setting state to %d",
+ ECORE_MSG(sc, "Completing command %d for queue %d, setting state to %d",
cmd, o->cids[ECORE_PRIMARY_CID_INDEX], o->next_state);
if (o->next_tx_only) /* print num tx-only if any exist */
- ECORE_MSG("primary cid %d: num tx-only cons %d",
+ ECORE_MSG(sc, "primary cid %d: num tx-only cons %d",
o->cids[ECORE_PRIMARY_CID_INDEX], o->next_tx_only);
o->state = o->next_state;
@@ -3784,7 +3787,7 @@ static void ecore_q_fill_init_general_data(struct bnx2x_softc *sc __rte_unused,
ECORE_TEST_BIT(ECORE_Q_FLG_FCOE, flags) ?
LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
- ECORE_MSG("flags: active %d, cos %d, stats en %d",
+ ECORE_MSG(sc, "flags: active %d, cos %d, stats en %d",
gen_data->activate_flg, gen_data->cos,
gen_data->statistics_en_flg);
}
@@ -3925,7 +3928,7 @@ static void ecore_q_fill_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queu
ecore_q_fill_init_tx_data(&cmd_params->params.tx_only.txq_params,
&data->tx, &cmd_params->params.tx_only.flags);
- ECORE_MSG("cid %d, tx bd page lo %x hi %x",
+ ECORE_MSG(sc, "cid %d, tx bd page lo %x hi %x",
cmd_params->q_obj->cids[0],
data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
}
@@ -3975,9 +3978,9 @@ static int ecore_q_init(struct bnx2x_softc *sc,
/* Set CDU context validation values */
for (cos = 0; cos < o->max_cos; cos++) {
- ECORE_MSG("setting context validation. cid %d, cos %d",
+ ECORE_MSG(sc, "setting context validation. cid %d, cos %d",
o->cids[cos], cos);
- ECORE_MSG("context pointer %p", init->cxts[cos]);
+ ECORE_MSG(sc, "context pointer %p", init->cxts[cos]);
ECORE_SET_CTX_VALIDATION(sc, init->cxts[cos], o->cids[cos]);
}
@@ -4061,15 +4064,15 @@ static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue
if (ECORE_TEST_BIT(ECORE_Q_TYPE_FWD, &o->type))
ramrod = RAMROD_CMD_ID_ETH_FORWARD_SETUP;
- ECORE_MSG("sending forward tx-only ramrod");
+ ECORE_MSG(sc, "sending forward tx-only ramrod");
if (cid_index >= o->max_cos) {
- PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
o->cl_id, cid_index);
return ECORE_INVAL;
}
- ECORE_MSG("parameters received: cos: %d sp-id: %d",
+ ECORE_MSG(sc, "parameters received: cos: %d sp-id: %d",
tx_only_params->gen_params.cos,
tx_only_params->gen_params.spcl_id);
@@ -4079,8 +4082,8 @@ static int ecore_q_send_setup_tx_only(struct bnx2x_softc *sc, struct ecore_queue
/* Fill the ramrod data */
ecore_q_fill_setup_tx_only(sc, params, rdata);
- ECORE_MSG
- ("sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
+ ECORE_MSG
+ (sc, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d",
o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
@@ -4175,7 +4178,7 @@ static int ecore_q_send_update(struct bnx2x_softc *sc,
uint8_t cid_index = update_params->cid_index;
if (cid_index >= o->max_cos) {
- PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
o->cl_id, cid_index);
return ECORE_INVAL;
}
@@ -4269,7 +4272,7 @@ static int ecore_q_send_cfc_del(struct bnx2x_softc *sc,
uint8_t cid_idx = params->params.cfc_del.cid_index;
if (cid_idx >= o->max_cos) {
- PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
o->cl_id, cid_idx);
return ECORE_INVAL;
}
@@ -4285,7 +4288,7 @@ static int ecore_q_send_terminate(struct bnx2x_softc *sc, struct ecore_queue_sta
uint8_t cid_index = params->params.terminate.cid_index;
if (cid_index >= o->max_cos) {
- PMD_DRV_LOG(ERR, "queue[%d]: cid_index (%d) is out of range",
+ PMD_DRV_LOG(ERR, sc, "queue[%d]: cid_index (%d) is out of range",
o->cl_id, cid_index);
return ECORE_INVAL;
}
@@ -4329,7 +4332,7 @@ static int ecore_queue_send_cmd_cmn(struct bnx2x_softc *sc, struct ecore_queue_s
case ECORE_Q_CMD_EMPTY:
return ecore_q_send_empty(sc, params);
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
return ECORE_INVAL;
}
}
@@ -4352,7 +4355,7 @@ static int ecore_queue_send_cmd_e1x(struct bnx2x_softc *sc,
case ECORE_Q_CMD_EMPTY:
return ecore_queue_send_cmd_cmn(sc, params);
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
return ECORE_INVAL;
}
}
@@ -4375,7 +4378,7 @@ static int ecore_queue_send_cmd_e2(struct bnx2x_softc *sc,
case ECORE_Q_CMD_EMPTY:
return ecore_queue_send_cmd_cmn(sc, params);
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
return ECORE_INVAL;
}
}
@@ -4418,7 +4421,7 @@ static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
* the previous one.
*/
if (o->pending) {
- PMD_DRV_LOG(ERR, "Blocking transition since pending was %lx",
+ PMD_DRV_LOG(ERR, sc, "Blocking transition since pending was %lx",
o->pending);
return ECORE_BUSY;
}
@@ -4545,19 +4548,19 @@ static int ecore_queue_chk_transition(struct bnx2x_softc *sc __rte_unused,
break;
default:
- PMD_DRV_LOG(ERR, "Illegal state: %d", state);
+ PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
}
/* Transition is assured */
if (next_state != ECORE_Q_STATE_MAX) {
- ECORE_MSG("Good state transition: %d(%d)->%d",
+ ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
state, cmd, next_state);
o->next_state = next_state;
o->next_tx_only = next_tx_only;
return ECORE_SUCCESS;
}
- ECORE_MSG("Bad state transition request: %d %d", state, cmd);
+ ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
return ECORE_INVAL;
}
@@ -4608,18 +4611,18 @@ static int ecore_queue_chk_fwd_transition(struct bnx2x_softc *sc __rte_unused,
break;
default:
- PMD_DRV_LOG(ERR, "Illegal state: %d", state);
+ PMD_DRV_LOG(ERR, sc, "Illegal state: %d", state);
}
/* Transition is assured */
if (next_state != ECORE_Q_STATE_MAX) {
- ECORE_MSG("Good state transition: %d(%d)->%d",
+ ECORE_MSG(sc, "Good state transition: %d(%d)->%d",
state, cmd, next_state);
o->next_state = next_state;
return ECORE_SUCCESS;
}
- ECORE_MSG("Bad state transition request: %d %d", state, cmd);
+ ECORE_MSG(sc, "Bad state transition request: %d %d", state, cmd);
return ECORE_INVAL;
}
@@ -4699,14 +4702,14 @@ ecore_func_state_change_comp(struct bnx2x_softc *sc __rte_unused,
unsigned long cur_pending = o->pending;
if (!ECORE_TEST_AND_CLEAR_BIT(cmd, &cur_pending)) {
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d",
cmd, ECORE_FUNC_ID(sc), o->state, cur_pending,
o->next_state);
return ECORE_INVAL;
}
- ECORE_MSG("Completing command %d for func %d, setting state to %d",
+ ECORE_MSG(sc, "Completing command %d for func %d, setting state to %d",
cmd, ECORE_FUNC_ID(sc), o->next_state);
o->state = o->next_state;
@@ -4829,18 +4832,19 @@ static int ecore_func_chk_transition(struct bnx2x_softc *sc __rte_unused,
break;
default:
- PMD_DRV_LOG(ERR, "Unknown state: %d", state);
+ PMD_DRV_LOG(ERR, sc, "Unknown state: %d", state);
}
/* Transition is assured */
if (next_state != ECORE_F_STATE_MAX) {
- ECORE_MSG("Good function state transition: %d(%d)->%d",
+ ECORE_MSG(sc, "Good function state transition: %d(%d)->%d",
state, cmd, next_state);
o->next_state = next_state;
return ECORE_SUCCESS;
}
- ECORE_MSG("Bad function state transition request: %d %d", state, cmd);
+ ECORE_MSG(sc,
+ "Bad function state transition request: %d %d", state, cmd);
return ECORE_INVAL;
}
@@ -4930,13 +4934,13 @@ static int ecore_func_hw_init(struct bnx2x_softc *sc,
const struct ecore_func_sp_drv_ops *drv = o->drv;
int rc = 0;
- ECORE_MSG("function %d load_code %x",
+ ECORE_MSG(sc, "function %d load_code %x",
ECORE_ABS_FUNC_ID(sc), load_code);
/* Prepare FW */
rc = drv->init_fw(sc);
if (rc) {
- PMD_DRV_LOG(ERR, "Error loading firmware");
+ PMD_DRV_LOG(ERR, sc, "Error loading firmware");
goto init_err;
}
@@ -4967,7 +4971,7 @@ static int ecore_func_hw_init(struct bnx2x_softc *sc,
break;
default:
- PMD_DRV_LOG(ERR, "Unknown load_code (0x%x) from MCP",
+ PMD_DRV_LOG(ERR, sc, "Unknown load_code (0x%x) from MCP",
load_code);
rc = ECORE_INVAL;
}
@@ -5043,7 +5047,7 @@ static int ecore_func_hw_reset(struct bnx2x_softc *sc,
struct ecore_func_sp_obj *o = params->f_obj;
const struct ecore_func_sp_drv_ops *drv = o->drv;
- ECORE_MSG("function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc),
+ ECORE_MSG(sc, "function %d reset_phase %x", ECORE_ABS_FUNC_ID(sc),
reset_phase);
switch (reset_phase) {
@@ -5057,7 +5061,7 @@ static int ecore_func_hw_reset(struct bnx2x_softc *sc,
ecore_func_reset_func(sc, drv);
break;
default:
- PMD_DRV_LOG(ERR, "Unknown reset_phase (0x%x) from MCP",
+ PMD_DRV_LOG(ERR, sc, "Unknown reset_phase (0x%x) from MCP",
reset_phase);
break;
}
@@ -5148,7 +5152,7 @@ static int ecore_func_send_afex_update(struct bnx2x_softc *sc, struct ecore_func
* read and we will have to put a full memory barrier there
* (inside ecore_sp_post()).
*/
- ECORE_MSG("afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
+ ECORE_MSG(sc, "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x",
rdata->vif_id,
rdata->afex_default_vlan, rdata->allowed_priorities);
@@ -5186,8 +5190,8 @@ inline int ecore_func_send_afex_viflists(struct bnx2x_softc *sc,
* (inside ecore_sp_post()).
*/
- ECORE_MSG
- ("afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
+ ECORE_MSG
+ (sc, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x",
rdata->afex_vif_list_command, rdata->vif_list_index,
rdata->func_bit_map, rdata->func_to_clear);
@@ -5258,7 +5262,7 @@ static int ecore_func_send_cmd(struct bnx2x_softc *sc,
case ECORE_F_CMD_SWITCH_UPDATE:
return ecore_func_send_switch_update(sc, params);
default:
- PMD_DRV_LOG(ERR, "Unknown command: %d", params->cmd);
+ PMD_DRV_LOG(ERR, sc, "Unknown command: %d", params->cmd);
return ECORE_INVAL;
}
}
@@ -5319,7 +5323,7 @@ int ecore_func_state_change(struct bnx2x_softc *sc,
}
if (rc == ECORE_BUSY) {
ECORE_MUTEX_UNLOCK(&o->one_pending_mutex);
- PMD_DRV_LOG(ERR,
+ PMD_DRV_LOG(ERR, sc,
"timeout waiting for previous ramrod completion");
return rc;
}
diff --git a/drivers/net/bnx2x/ecore_sp.h b/drivers/net/bnx2x/ecore_sp.h
index ff40413c..7e52245a 100644
--- a/drivers/net/bnx2x/ecore_sp.h
+++ b/drivers/net/bnx2x/ecore_sp.h
@@ -217,8 +217,8 @@ ECORE_CRC32_LE(uint32_t seed, uint8_t *mac, uint32_t len)
} while (0)
-#define ECORE_MSG(m, ...) \
- PMD_DRV_LOG(DEBUG, m, ##__VA_ARGS__)
+#define ECORE_MSG(sc, m, ...) \
+ PMD_DRV_LOG(DEBUG, sc, m, ##__VA_ARGS__)
typedef struct _ecore_list_entry_t
{
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index 74e1bead..0ba25494 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -944,7 +944,7 @@ static int elink_check_lfa(struct elink_params *params)
* to verify DCC bit is cleared in any case!
*/
if (additional_config & NO_LFA_DUE_TO_DCC_MASK) {
- PMD_DRV_LOG(DEBUG, "No LFA due to DCC flap after clp exit");
+ PMD_DRV_LOG(DEBUG, sc, "No LFA due to DCC flap after clp exit");
REG_WR(sc, params->lfa_base +
offsetof(struct shmem_lfa, additional_config),
additional_config & ~NO_LFA_DUE_TO_DCC_MASK);
@@ -985,7 +985,7 @@ static int elink_check_lfa(struct elink_params *params)
offsetof(struct shmem_lfa, req_duplex));
req_val = params->req_duplex[0] | (params->req_duplex[1] << 16);
if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
- PMD_DRV_LOG(INFO, "Duplex mismatch %x vs. %x",
+ PMD_DRV_LOG(INFO, sc, "Duplex mismatch %x vs. %x",
(saved_val & lfa_mask), (req_val & lfa_mask));
return LFA_DUPLEX_MISMATCH;
}
@@ -994,7 +994,7 @@ static int elink_check_lfa(struct elink_params *params)
offsetof(struct shmem_lfa, req_flow_ctrl));
req_val = params->req_flow_ctrl[0] | (params->req_flow_ctrl[1] << 16);
if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
- PMD_DRV_LOG(DEBUG, "Flow control mismatch %x vs. %x",
+ PMD_DRV_LOG(DEBUG, sc, "Flow control mismatch %x vs. %x",
(saved_val & lfa_mask), (req_val & lfa_mask));
return LFA_FLOW_CTRL_MISMATCH;
}
@@ -1003,7 +1003,7 @@ static int elink_check_lfa(struct elink_params *params)
offsetof(struct shmem_lfa, req_line_speed));
req_val = params->req_line_speed[0] | (params->req_line_speed[1] << 16);
if ((saved_val & lfa_mask) != (req_val & lfa_mask)) {
- PMD_DRV_LOG(DEBUG, "Link speed mismatch %x vs. %x",
+ PMD_DRV_LOG(DEBUG, sc, "Link speed mismatch %x vs. %x",
(saved_val & lfa_mask), (req_val & lfa_mask));
return LFA_LINK_SPEED_MISMATCH;
}
@@ -1014,7 +1014,7 @@ static int elink_check_lfa(struct elink_params *params)
speed_cap_mask[cfg_idx]));
if (cur_speed_cap_mask != params->speed_cap_mask[cfg_idx]) {
- PMD_DRV_LOG(DEBUG, "Speed Cap mismatch %x vs. %x",
+ PMD_DRV_LOG(DEBUG, sc, "Speed Cap mismatch %x vs. %x",
cur_speed_cap_mask,
params->speed_cap_mask[cfg_idx]);
return LFA_SPEED_CAP_MISMATCH;
@@ -1027,7 +1027,7 @@ static int elink_check_lfa(struct elink_params *params)
REQ_FC_AUTO_ADV_MASK;
if ((uint16_t) cur_req_fc_auto_adv != params->req_fc_auto_adv) {
- PMD_DRV_LOG(DEBUG, "Flow Ctrl AN mismatch %x vs. %x",
+ PMD_DRV_LOG(DEBUG, sc, "Flow Ctrl AN mismatch %x vs. %x",
cur_req_fc_auto_adv, params->req_fc_auto_adv);
return LFA_FLOW_CTRL_MISMATCH;
}
@@ -1040,7 +1040,8 @@ static int elink_check_lfa(struct elink_params *params)
(params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI)) ||
((eee_status & SHMEM_EEE_REQUESTED_BIT) ^
(params->eee_mode & ELINK_EEE_MODE_ADV_LPI))) {
- PMD_DRV_LOG(DEBUG, "EEE mismatch %x vs. %x", params->eee_mode,
+ PMD_DRV_LOG(DEBUG, sc,
+ "EEE mismatch %x vs. %x", params->eee_mode,
eee_status);
return LFA_EEE_MISMATCH;
}
@@ -1059,7 +1060,7 @@ static void elink_get_epio(struct bnx2x_softc *sc, uint32_t epio_pin,
*en = 0;
/* Sanity check */
if (epio_pin > 31) {
- PMD_DRV_LOG(DEBUG, "Invalid EPIO pin %d to get", epio_pin);
+ PMD_DRV_LOG(DEBUG, sc, "Invalid EPIO pin %d to get", epio_pin);
return;
}
@@ -1077,10 +1078,10 @@ static void elink_set_epio(struct bnx2x_softc *sc, uint32_t epio_pin, uint32_t e
/* Sanity check */
if (epio_pin > 31) {
- PMD_DRV_LOG(DEBUG, "Invalid EPIO pin %d to set", epio_pin);
+ PMD_DRV_LOG(DEBUG, sc, "Invalid EPIO pin %d to set", epio_pin);
return;
}
- PMD_DRV_LOG(DEBUG, "Setting EPIO pin %d to %d", epio_pin, en);
+ PMD_DRV_LOG(DEBUG, sc, "Setting EPIO pin %d to %d", epio_pin, en);
epio_mask = 1 << epio_pin;
/* Set this EPIO to output */
gp_output = REG_RD(sc, MCP_REG_MCPR_GP_OUTPUTS);
@@ -1211,7 +1212,7 @@ static void elink_set_mdio_clk(struct bnx2x_softc *sc, uint32_t emac_base)
new_mode |= clc_cnt;
new_mode |= (EMAC_MDIO_MODE_CLAUSE_45);
- PMD_DRV_LOG(DEBUG, "Changing emac_mode from 0x%x to 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Changing emac_mode from 0x%x to 0x%x",
cur_mode, new_mode);
REG_WR(sc, emac_base + EMAC_REG_EMAC_MDIO_MODE, new_mode);
DELAY(40);
@@ -1264,9 +1265,9 @@ static void elink_emac_init(struct elink_params *params)
timeout = 200;
do {
val = REG_RD(sc, emac_base + EMAC_REG_EMAC_MODE);
- PMD_DRV_LOG(DEBUG, "EMAC reset reg is %u", val);
+ PMD_DRV_LOG(DEBUG, sc, "EMAC reset reg is %u", val);
if (!timeout) {
- PMD_DRV_LOG(DEBUG, "EMAC timeout!");
+ PMD_DRV_LOG(DEBUG, sc, "EMAC timeout!");
return;
}
timeout--;
@@ -1329,7 +1330,7 @@ static void elink_umac_enable(struct elink_params *params,
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
- PMD_DRV_LOG(DEBUG, "enabling UMAC");
+ PMD_DRV_LOG(DEBUG, sc, "enabling UMAC");
/* This register opens the gate for the UMAC despite its name */
REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + params->port * 4, 1);
@@ -1352,7 +1353,7 @@ static void elink_umac_enable(struct elink_params *params,
val |= (3 << 2);
break;
default:
- PMD_DRV_LOG(DEBUG, "Invalid speed for UMAC %d",
+ PMD_DRV_LOG(DEBUG, sc, "Invalid speed for UMAC %d",
vars->line_speed);
break;
}
@@ -1370,7 +1371,7 @@ static void elink_umac_enable(struct elink_params *params,
/* Configure UMAC for EEE */
if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
- PMD_DRV_LOG(DEBUG, "configured UMAC for EEE");
+ PMD_DRV_LOG(DEBUG, sc, "configured UMAC for EEE");
REG_WR(sc, umac_base + UMAC_REG_UMAC_EEE_CTRL,
UMAC_UMAC_EEE_CTRL_REG_EEE_EN);
REG_WR(sc, umac_base + UMAC_REG_EEE_WAKE_TIMER, 0x11);
@@ -1428,7 +1429,7 @@ static void elink_xmac_init(struct elink_params *params, uint32_t max_speed)
is_port4mode &&
(REG_RD(sc, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)) {
- PMD_DRV_LOG(DEBUG, "XMAC already out of reset in 4-port mode");
+ PMD_DRV_LOG(DEBUG, sc, "XMAC already out of reset in 4-port mode");
return;
}
@@ -1440,7 +1441,7 @@ static void elink_xmac_init(struct elink_params *params, uint32_t max_speed)
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC);
if (is_port4mode) {
- PMD_DRV_LOG(DEBUG, "Init XMAC to 2 ports x 10G per path");
+ PMD_DRV_LOG(DEBUG, sc, "Init XMAC to 2 ports x 10G per path");
/* Set the number of ports on the system side to up to 2 */
REG_WR(sc, MISC_REG_XMAC_CORE_PORT_MODE, 1);
@@ -1451,12 +1452,12 @@ static void elink_xmac_init(struct elink_params *params, uint32_t max_speed)
/* Set the number of ports on the system side to 1 */
REG_WR(sc, MISC_REG_XMAC_CORE_PORT_MODE, 0);
if (max_speed == ELINK_SPEED_10000) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Init XMAC to 10G x 1 port per path");
/* Set the number of ports on the Warp Core to 10G */
REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 3);
} else {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Init XMAC to 20G x 2 ports per path");
/* Set the number of ports on the Warp Core to 20G */
REG_WR(sc, MISC_REG_XMAC_PHY_PORT_MODE, 1);
@@ -1489,7 +1490,7 @@ static void elink_set_xmac_rxtx(struct elink_params *params, uint8_t en)
(pfc_ctrl & ~(1 << 1)));
REG_WR(sc, xmac_base + XMAC_REG_PFC_CTRL_HI,
(pfc_ctrl | (1 << 1)));
- PMD_DRV_LOG(DEBUG, "Disable XMAC on port %x", port);
+ PMD_DRV_LOG(DEBUG, sc, "Disable XMAC on port %x", port);
val = REG_RD(sc, xmac_base + XMAC_REG_CTRL);
if (en)
val |= (XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN);
@@ -1504,7 +1505,7 @@ static elink_status_t elink_xmac_enable(struct elink_params *params,
{
uint32_t val, xmac_base;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "enabling XMAC");
+ PMD_DRV_LOG(DEBUG, sc, "enabling XMAC");
xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
@@ -1541,7 +1542,7 @@ static elink_status_t elink_xmac_enable(struct elink_params *params,
elink_update_pfc_xmac(params, vars);
if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
- PMD_DRV_LOG(DEBUG, "Setting XMAC for EEE");
+ PMD_DRV_LOG(DEBUG, sc, "Setting XMAC for EEE");
REG_WR(sc, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
REG_WR(sc, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
} else {
@@ -1577,7 +1578,7 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
uint32_t val;
- PMD_DRV_LOG(DEBUG, "enabling EMAC");
+ PMD_DRV_LOG(DEBUG, sc, "enabling EMAC");
/* Disable BMAC */
REG_WR(sc, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
@@ -1591,14 +1592,14 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- PMD_DRV_LOG(DEBUG, "XGXS");
+ PMD_DRV_LOG(DEBUG, sc, "XGXS");
/* select the master lanes (out of 0-3) */
REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, ser_lane);
/* select XGXS */
REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 1);
} else { /* SerDes */
- PMD_DRV_LOG(DEBUG, "SerDes");
+ PMD_DRV_LOG(DEBUG, sc, "SerDes");
/* select SerDes */
REG_WR(sc, NIG_REG_XGXS_SERDES0_MODE_SEL + port * 4, 0);
}
@@ -1644,7 +1645,7 @@ static elink_status_t elink_emac_enable(struct elink_params *params,
*/
elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE, 0);
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) {
- PMD_DRV_LOG(DEBUG, "PFC is enabled");
+ PMD_DRV_LOG(DEBUG, sc, "PFC is enabled");
/* Enable PFC again */
elink_cb_reg_write(sc, emac_base + EMAC_REG_RX_PFC_MODE,
EMAC_REG_RX_PFC_MODE_RX_EN |
@@ -1764,7 +1765,7 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
REG_WR_DMAE(sc, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED) {
- PMD_DRV_LOG(DEBUG, "PFC is enabled");
+ PMD_DRV_LOG(DEBUG, sc, "PFC is enabled");
/* Enable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x0;
wb_data[0] |= (1 << 0); /* RX */
@@ -1778,7 +1779,7 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
/* Clear the force Xon */
wb_data[0] &= ~(1 << 2);
} else {
- PMD_DRV_LOG(DEBUG, "PFC is disabled");
+ PMD_DRV_LOG(DEBUG, sc, "PFC is disabled");
/* Disable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x8;
wb_data[1] = 0;
@@ -1804,7 +1805,7 @@ static void elink_update_pfc_bmac2(struct elink_params *params,
val = 0x3; /* Enable RX and TX */
if (is_lb) {
val |= 0x4; /* Local loopback */
- PMD_DRV_LOG(DEBUG, "enable bmac loopback");
+ PMD_DRV_LOG(DEBUG, sc, "enable bmac loopback");
}
/* When PFC enabled, Pass pause frames towards the NIG. */
if (params->feature_config_flags & ELINK_FEATURE_CONFIG_PFC_ENABLED)
@@ -1898,7 +1899,7 @@ static void elink_update_pfc_nig(struct elink_params *params,
int set_pfc = params->feature_config_flags &
ELINK_FEATURE_CONFIG_PFC_ENABLED;
- PMD_DRV_LOG(DEBUG, "updating pfc nig parameters");
+ PMD_DRV_LOG(DEBUG, sc, "updating pfc nig parameters");
/* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
* MAC control frames (that are not pause packets)
@@ -2010,7 +2011,7 @@ elink_status_t elink_update_pfc(struct elink_params *params,
if (!vars->link_up)
return elink_status;
- PMD_DRV_LOG(DEBUG, "About to update PFC in BMAC");
+ PMD_DRV_LOG(DEBUG, sc, "About to update PFC in BMAC");
if (CHIP_IS_E3(sc)) {
if (vars->mac_type == ELINK_MAC_TYPE_XMAC)
@@ -2020,7 +2021,7 @@ elink_status_t elink_update_pfc(struct elink_params *params,
if ((val &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
== 0) {
- PMD_DRV_LOG(DEBUG, "About to update PFC in EMAC");
+ PMD_DRV_LOG(DEBUG, sc, "About to update PFC in EMAC");
elink_emac_enable(params, vars, 0);
return elink_status;
}
@@ -2049,7 +2050,7 @@ static elink_status_t elink_bmac1_enable(struct elink_params *params,
uint32_t wb_data[2];
uint32_t val;
- PMD_DRV_LOG(DEBUG, "Enabling BigMAC1");
+ PMD_DRV_LOG(DEBUG, sc, "Enabling BigMAC1");
/* XGXS control */
wb_data[0] = 0x3c;
@@ -2068,7 +2069,7 @@ static elink_status_t elink_bmac1_enable(struct elink_params *params,
val = 0x3;
if (is_lb) {
val |= 0x4;
- PMD_DRV_LOG(DEBUG, "enable bmac loopback");
+ PMD_DRV_LOG(DEBUG, sc, "enable bmac loopback");
}
wb_data[0] = val;
wb_data[1] = 0;
@@ -2109,7 +2110,7 @@ static elink_status_t elink_bmac2_enable(struct elink_params *params,
NIG_REG_INGRESS_BMAC0_MEM;
uint32_t wb_data[2];
- PMD_DRV_LOG(DEBUG, "Enabling BigMAC2");
+ PMD_DRV_LOG(DEBUG, sc, "Enabling BigMAC2");
wb_data[0] = 0;
wb_data[1] = 0;
@@ -2247,7 +2248,7 @@ static elink_status_t elink_pbf_update(struct elink_params *params,
/* Wait for init credit */
init_crd = REG_RD(sc, PBF_REG_P0_INIT_CRD + port * 4);
crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8);
- PMD_DRV_LOG(DEBUG, "init_crd 0x%x crd 0x%x", init_crd, crd);
+ PMD_DRV_LOG(DEBUG, sc, "init_crd 0x%x crd 0x%x", init_crd, crd);
while ((init_crd != crd) && count) {
DELAY(1000 * 5);
@@ -2256,7 +2257,7 @@ static elink_status_t elink_pbf_update(struct elink_params *params,
}
crd = REG_RD(sc, PBF_REG_P0_CREDIT + port * 8);
if (init_crd != crd) {
- PMD_DRV_LOG(DEBUG, "BUG! init_crd 0x%x != crd 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "BUG! init_crd 0x%x != crd 0x%x",
init_crd, crd);
return ELINK_STATUS_ERROR;
}
@@ -2283,13 +2284,13 @@ static elink_status_t elink_pbf_update(struct elink_params *params,
init_crd = thresh + 553 - 22;
break;
default:
- PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Invalid line_speed 0x%x",
line_speed);
return ELINK_STATUS_ERROR;
}
}
REG_WR(sc, PBF_REG_P0_INIT_CRD + port * 4, init_crd);
- PMD_DRV_LOG(DEBUG, "PBF updated to speed %d credit %d",
+ PMD_DRV_LOG(DEBUG, sc, "PBF updated to speed %d credit %d",
line_speed, init_crd);
/* Probe the credit changes */
@@ -2379,7 +2380,7 @@ static elink_status_t elink_cl22_write(struct bnx2x_softc *sc,
}
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "write phy register failed");
+ PMD_DRV_LOG(DEBUG, sc, "write phy register failed");
rc = ELINK_STATUS_TIMEOUT;
}
REG_WR(sc, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
@@ -2415,7 +2416,7 @@ static elink_status_t elink_cl22_read(struct bnx2x_softc *sc,
}
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "read phy register failed");
+ PMD_DRV_LOG(DEBUG, sc, "read phy register failed");
*ret_val = 0;
rc = ELINK_STATUS_TIMEOUT;
@@ -2456,7 +2457,7 @@ static elink_status_t elink_cl45_read(struct bnx2x_softc *sc,
}
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "read phy register failed");
+ PMD_DRV_LOG(DEBUG, sc, "read phy register failed");
elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
*ret_val = 0;
@@ -2480,7 +2481,7 @@ static elink_status_t elink_cl45_read(struct bnx2x_softc *sc,
}
}
if (val & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "read phy register failed");
+ PMD_DRV_LOG(DEBUG, sc, "read phy register failed");
elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
*ret_val = 0;
@@ -2532,7 +2533,7 @@ static elink_status_t elink_cl45_write(struct bnx2x_softc *sc,
}
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "write phy register failed");
+ PMD_DRV_LOG(DEBUG, sc, "write phy register failed");
elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
rc = ELINK_STATUS_TIMEOUT;
@@ -2554,7 +2555,7 @@ static elink_status_t elink_cl45_write(struct bnx2x_softc *sc,
}
}
if (tmp & EMAC_MDIO_COMM_START_BUSY) {
- PMD_DRV_LOG(DEBUG, "write phy register failed");
+ PMD_DRV_LOG(DEBUG, sc, "write phy register failed");
elink_cb_event_log(sc, ELINK_LOG_ID_MDIO_ACCESS_TIMEOUT); // "MDC/MDIO access timeout"
rc = ELINK_STATUS_TIMEOUT;
@@ -2677,7 +2678,7 @@ static elink_status_t elink_eee_set_timers(struct elink_params *params,
} else if ((params->eee_mode & ELINK_EEE_MODE_ENABLE_LPI) &&
(params->eee_mode & ELINK_EEE_MODE_OVERRIDE_NVRAM) &&
(params->eee_mode & ELINK_EEE_MODE_OUTPUT_TIME)) {
- PMD_DRV_LOG(DEBUG, "Error: Tx LPI is enabled with timer 0");
+ PMD_DRV_LOG(DEBUG, sc, "Error: Tx LPI is enabled with timer 0");
return ELINK_STATUS_ERROR;
}
@@ -2744,11 +2745,11 @@ static elink_status_t elink_eee_advertise(struct elink_phy *phy,
REG_WR(sc, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
if (modes & SHMEM_EEE_10G_ADV) {
- PMD_DRV_LOG(DEBUG, "Advertise 10GBase-T EEE");
+ PMD_DRV_LOG(DEBUG, sc, "Advertise 10GBase-T EEE");
val |= 0x8;
}
if (modes & SHMEM_EEE_1G_ADV) {
- PMD_DRV_LOG(DEBUG, "Advertise 1GBase-T EEE");
+ PMD_DRV_LOG(DEBUG, sc, "Advertise 1GBase-T EEE");
val |= 0x4;
}
@@ -2788,7 +2789,7 @@ static void elink_eee_an_resolve(struct elink_phy *phy,
if (adv & 0x2) {
if (vars->line_speed == ELINK_SPEED_100)
neg = 1;
- PMD_DRV_LOG(DEBUG, "EEE negotiated - 100M");
+ PMD_DRV_LOG(DEBUG, sc, "EEE negotiated - 100M");
}
}
if (lp & 0x14) {
@@ -2796,7 +2797,7 @@ static void elink_eee_an_resolve(struct elink_phy *phy,
if (adv & 0x14) {
if (vars->line_speed == ELINK_SPEED_1000)
neg = 1;
- PMD_DRV_LOG(DEBUG, "EEE negotiated - 1G");
+ PMD_DRV_LOG(DEBUG, sc, "EEE negotiated - 1G");
}
}
if (lp & 0x68) {
@@ -2804,7 +2805,7 @@ static void elink_eee_an_resolve(struct elink_phy *phy,
if (adv & 0x68) {
if (vars->line_speed == ELINK_SPEED_10000)
neg = 1;
- PMD_DRV_LOG(DEBUG, "EEE negotiated - 10G");
+ PMD_DRV_LOG(DEBUG, sc, "EEE negotiated - 10G");
}
}
@@ -2812,7 +2813,7 @@ static void elink_eee_an_resolve(struct elink_phy *phy,
vars->eee_status |= (lp_adv << SHMEM_EEE_LP_ADV_STATUS_SHIFT);
if (neg) {
- PMD_DRV_LOG(DEBUG, "EEE is active");
+ PMD_DRV_LOG(DEBUG, sc, "EEE is active");
vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
}
}
@@ -2842,7 +2843,7 @@ static void elink_bsc_module_sel(struct elink_params *params)
e3_cmn_pin_cfg));
i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
- PMD_DRV_LOG(DEBUG, "Setting BSC switch");
+ PMD_DRV_LOG(DEBUG, sc, "Setting BSC switch");
for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
elink_set_cfg_pin(sc, i2c_pins[idx], i2c_val[idx]);
}
@@ -2858,7 +2859,7 @@ static elink_status_t elink_bsc_read(struct elink_params *params,
elink_status_t rc = ELINK_STATUS_OK;
if (xfer_cnt > 16) {
- PMD_DRV_LOG(DEBUG, "invalid xfer_cnt %d. Max is 16 bytes",
+ PMD_DRV_LOG(DEBUG, sc, "invalid xfer_cnt %d. Max is 16 bytes",
xfer_cnt);
return ELINK_STATUS_ERROR;
}
@@ -2890,7 +2891,7 @@ static elink_status_t elink_bsc_read(struct elink_params *params,
DELAY(10);
val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND);
if (i++ > 1000) {
- PMD_DRV_LOG(DEBUG, "wr 0 byte timed out after %d try",
+ PMD_DRV_LOG(DEBUG, sc, "wr 0 byte timed out after %d try",
i);
rc = ELINK_STATUS_TIMEOUT;
break;
@@ -2914,7 +2915,8 @@ static elink_status_t elink_bsc_read(struct elink_params *params,
DELAY(10);
val = REG_RD(sc, MCP_REG_MCPR_IMC_COMMAND);
if (i++ > 1000) {
- PMD_DRV_LOG(DEBUG, "rd op timed out after %d try", i);
+ PMD_DRV_LOG(DEBUG, sc,
+ "rd op timed out after %d try", i);
rc = ELINK_STATUS_TIMEOUT;
break;
}
@@ -3059,7 +3061,7 @@ static void elink_serdes_deassert(struct bnx2x_softc *sc, uint8_t port)
{
uint32_t val;
- PMD_DRV_LOG(DEBUG, "elink_serdes_deassert");
+ PMD_DRV_LOG(DEBUG, sc, "elink_serdes_deassert");
val = ELINK_SERDES_RESET_BITS << (port * 16);
@@ -3094,7 +3096,7 @@ static void elink_xgxs_deassert(struct elink_params *params)
struct bnx2x_softc *sc = params->sc;
uint8_t port;
uint32_t val;
- PMD_DRV_LOG(DEBUG, "elink_xgxs_deassert");
+ PMD_DRV_LOG(DEBUG, sc, "elink_xgxs_deassert");
port = params->port;
val = ELINK_XGXS_RESET_BITS << (port * 16);
@@ -3145,7 +3147,7 @@ static void elink_calc_ieee_aneg_adv(struct elink_phy *phy,
*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
break;
}
- PMD_DRV_LOG(DEBUG, "ieee_fc = 0x%x", *ieee_fc);
+ PMD_DRV_LOG(DEBUG, params->sc, "ieee_fc = 0x%x", *ieee_fc);
}
static void set_phy_vars(struct elink_params *params, struct elink_vars *vars)
@@ -3179,7 +3181,7 @@ static void set_phy_vars(struct elink_params *params, struct elink_vars *vars)
ELINK_SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
- PMD_DRV_LOG(DEBUG, "req_flow_ctrl %x, req_line_speed %x,"
+ PMD_DRV_LOG(DEBUG, params->sc, "req_flow_ctrl %x, req_line_speed %x,"
" speed_cap_mask %x",
params->phy[actual_phy_idx].req_flow_ctrl,
params->phy[actual_phy_idx].req_line_speed,
@@ -3210,7 +3212,7 @@ static void elink_ext_phy_set_pause(struct elink_params *params,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
}
- PMD_DRV_LOG(DEBUG, "Ext phy AN advertize 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "Ext phy AN advertize 0x%x", val);
elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
}
@@ -3289,7 +3291,7 @@ static void elink_ext_phy_update_adv_fc(struct elink_phy *phy,
}
pause_result = (ld_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
pause_result |= (lp_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- PMD_DRV_LOG(DEBUG, "Ext PHY pause result 0x%x", pause_result);
+ PMD_DRV_LOG(DEBUG, sc, "Ext PHY pause result 0x%x", pause_result);
elink_pause_resolve(vars, pause_result);
}
@@ -3358,7 +3360,7 @@ static void elink_warpcore_enable_AN_KR2(struct elink_phy *phy,
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0157},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0620}
};
- PMD_DRV_LOG(DEBUG, "Enabling 20G-KR2");
+ PMD_DRV_LOG(DEBUG, sc, "Enabling 20G-KR2");
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_CL49_USERB0_CTRL, (3 << 6));
@@ -3395,7 +3397,7 @@ static void elink_disable_kr2(struct elink_params *params,
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_BAM_CODE, 0x0002},
{MDIO_WC_DEVAD, MDIO_WC_REG_ETA_CL73_LD_UD_CODE, 0x0000}
};
- PMD_DRV_LOG(DEBUG, "Disabling 20G-KR2");
+ PMD_DRV_LOG(DEBUG, sc, "Disabling 20G-KR2");
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
@@ -3411,7 +3413,7 @@ static void elink_warpcore_set_lpi_passthrough(struct elink_phy *phy,
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Configure WC for LPI pass through");
+ PMD_DRV_LOG(DEBUG, sc, "Configure WC for LPI pass through");
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_EEE_COMBO_CONTROL0, 0x7c);
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
@@ -3449,7 +3451,7 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
{MDIO_PMA_DEVAD, MDIO_WC_REG_PMD_KR_CONTROL, 0x2},
{MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_TX_FIR_TAP, 0},
};
- PMD_DRV_LOG(DEBUG, "Enable Auto Negotiation for KR");
+ PMD_DRV_LOG(DEBUG, sc, "Enable Auto Negotiation for KR");
/* Set to default registers that may be overridden by 10G force */
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
@@ -3471,7 +3473,7 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
/* Enable CL37 1G Parallel Detect */
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD, addr, 0x1);
- PMD_DRV_LOG(DEBUG, "Advertize 1G");
+ PMD_DRV_LOG(DEBUG, sc, "Advertize 1G");
}
if (((vars->line_speed == ELINK_SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
@@ -3485,7 +3487,7 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
elink_set_aer_mmd(params, phy);
- PMD_DRV_LOG(DEBUG, "Advertize 10G");
+ PMD_DRV_LOG(DEBUG, sc, "Advertize 10G");
}
/* Set Transmit PMD settings */
@@ -3522,7 +3524,7 @@ static void elink_warpcore_enable_AN_KR(struct elink_phy *phy,
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
1);
- PMD_DRV_LOG(DEBUG, "Enable CL37 BAM on KR");
+ PMD_DRV_LOG(DEBUG, sc, "Enable CL37 BAM on KR");
}
/* Advertise pause */
@@ -3859,7 +3861,7 @@ static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy,
elink_cl45_read_or_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
0x1000);
- PMD_DRV_LOG(DEBUG, "set SGMII AUTONEG");
+ PMD_DRV_LOG(DEBUG, sc, "set SGMII AUTONEG");
} else {
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
@@ -3874,7 +3876,7 @@ static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy,
val16 |= 0x0040;
break;
default:
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Speed not supported: 0x%x",
phy->req_line_speed);
return;
@@ -3886,11 +3888,11 @@ static void elink_warpcore_set_sgmii_speed(struct elink_phy *phy,
elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
- PMD_DRV_LOG(DEBUG, "set SGMII force speed %d",
+ PMD_DRV_LOG(DEBUG, sc, "set SGMII force speed %d",
phy->req_line_speed);
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- PMD_DRV_LOG(DEBUG, " (readback) %x", val16);
+ PMD_DRV_LOG(DEBUG, sc, " (readback) %x", val16);
}
/* SGMII Slave mode and disable signal detect */
@@ -4001,7 +4003,7 @@ static elink_status_t elink_get_mod_abs_int_cfg(struct bnx2x_softc *sc,
*/
if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
(cfg_pin > PIN_CFG_GPIO3_P1)) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"No cfg pin %x for module detect indication",
cfg_pin);
return ELINK_STATUS_ERROR;
@@ -4093,7 +4095,7 @@ static void elink_warpcore_config_runtime(struct elink_phy *phy,
0x1200);
vars->rx_tx_asic_rst--;
- PMD_DRV_LOG(DEBUG, "0x%x retry left",
+ PMD_DRV_LOG(DEBUG, sc, "0x%x retry left",
vars->rx_tx_asic_rst);
}
break;
@@ -4115,10 +4117,10 @@ static void elink_warpcore_config_sfi(struct elink_phy *phy,
if ((params->req_line_speed[ELINK_LINK_CONFIG_IDX(ELINK_INT_PHY)] ==
ELINK_SPEED_10000) &&
(phy->media_type != ELINK_ETH_PHY_SFP_1G_FIBER)) {
- PMD_DRV_LOG(DEBUG, "Setting 10G SFI");
+ PMD_DRV_LOG(DEBUG, params->sc, "Setting 10G SFI");
elink_warpcore_set_10G_XFI(phy, params, 0);
} else {
- PMD_DRV_LOG(DEBUG, "Setting 1G Fiber");
+ PMD_DRV_LOG(DEBUG, params->sc, "Setting 1G Fiber");
elink_warpcore_set_sgmii_speed(phy, params, 1, 0);
}
}
@@ -4135,7 +4137,7 @@ static void elink_sfp_e3_set_transmitter(struct elink_params *params,
dev_info.port_hw_config[port].e3_sfp_ctrl)) &
PORT_HW_CFG_E3_TX_LASER_MASK;
/* Set the !tx_en since this pin is DISABLE_TX_LASER */
- PMD_DRV_LOG(DEBUG, "Setting WC TX to %d", tx_en);
+ PMD_DRV_LOG(DEBUG, sc, "Setting WC TX to %d", tx_en);
/* For 20G, the expected pin to be used is 3 pins after the current */
elink_set_cfg_pin(sc, cfg_pin, tx_en ^ 1);
@@ -4156,7 +4158,7 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
dev_info.port_hw_config[params->port].
default_cfg)) &
PORT_HW_CFG_NET_SERDES_IF_MASK);
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Begin Warpcore init, link_speed %d, "
"serdes_net_if = 0x%x", vars->line_speed, serdes_net_if);
elink_set_aer_mmd(params, phy);
@@ -4167,7 +4169,7 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
((phy->req_line_speed == ELINK_SPEED_100) ||
(phy->req_line_speed == ELINK_SPEED_10)))) {
vars->phy_flags |= PHY_SGMII_FLAG;
- PMD_DRV_LOG(DEBUG, "Setting SGMII mode");
+ PMD_DRV_LOG(DEBUG, sc, "Setting SGMII mode");
elink_warpcore_clear_regs(phy, params, lane);
elink_warpcore_set_sgmii_speed(phy, params, 0, 1);
} else {
@@ -4177,7 +4179,7 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
if (params->loopback_mode != ELINK_LOOPBACK_EXT)
elink_warpcore_enable_AN_KR(phy, params, vars);
else {
- PMD_DRV_LOG(DEBUG, "Setting KR 10G-Force");
+ PMD_DRV_LOG(DEBUG, sc, "Setting KR 10G-Force");
elink_warpcore_set_10G_KR(phy, params);
}
break;
@@ -4185,14 +4187,14 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
case PORT_HW_CFG_NET_SERDES_IF_XFI:
elink_warpcore_clear_regs(phy, params, lane);
if (vars->line_speed == ELINK_SPEED_10000) {
- PMD_DRV_LOG(DEBUG, "Setting 10G XFI");
+ PMD_DRV_LOG(DEBUG, sc, "Setting 10G XFI");
elink_warpcore_set_10G_XFI(phy, params, 1);
} else {
if (ELINK_SINGLE_MEDIA_DIRECT(params)) {
- PMD_DRV_LOG(DEBUG, "1G Fiber");
+ PMD_DRV_LOG(DEBUG, sc, "1G Fiber");
fiber_mode = 1;
} else {
- PMD_DRV_LOG(DEBUG, "10/100/1G SGMII");
+ PMD_DRV_LOG(DEBUG, sc, "10/100/1G SGMII");
fiber_mode = 0;
}
elink_warpcore_set_sgmii_speed(phy,
@@ -4221,10 +4223,10 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
if (vars->line_speed != ELINK_SPEED_20000) {
- PMD_DRV_LOG(DEBUG, "Speed not supported yet");
+ PMD_DRV_LOG(DEBUG, sc, "Speed not supported yet");
return 0;
}
- PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS");
+ PMD_DRV_LOG(DEBUG, sc, "Setting 20G DXGXS");
elink_warpcore_set_20G_DXGXS(sc, phy, lane);
/* Issue Module detection */
@@ -4234,12 +4236,12 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
if (!params->loopback_mode) {
elink_warpcore_enable_AN_KR(phy, params, vars);
} else {
- PMD_DRV_LOG(DEBUG, "Setting KR 20G-Force");
+ PMD_DRV_LOG(DEBUG, sc, "Setting KR 20G-Force");
elink_warpcore_set_20G_force_KR2(phy, params);
}
break;
default:
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Unsupported Serdes Net Interface 0x%x",
serdes_net_if);
return 0;
@@ -4248,7 +4250,7 @@ static uint8_t elink_warpcore_config_init(struct elink_phy *phy,
/* Take lane out of reset after configuration is finished */
elink_warpcore_reset_lane(sc, phy, 0);
- PMD_DRV_LOG(DEBUG, "Exit config init");
+ PMD_DRV_LOG(DEBUG, sc, "Exit config init");
return 0;
}
@@ -4313,7 +4315,7 @@ static void elink_set_warpcore_loopback(struct elink_phy *phy,
struct bnx2x_softc *sc = params->sc;
uint16_t val16;
uint32_t lane;
- PMD_DRV_LOG(DEBUG, "Setting Warpcore loopback type %x, speed %d",
+ PMD_DRV_LOG(DEBUG, sc, "Setting Warpcore loopback type %x, speed %d",
params->loopback_mode, phy->req_line_speed);
if (phy->req_line_speed < ELINK_SPEED_10000 ||
@@ -4358,7 +4360,7 @@ static void elink_sync_link(struct elink_params *params,
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
if (vars->link_up) {
- PMD_DRV_LOG(DEBUG, "phy link up");
+ PMD_DRV_LOG(DEBUG, sc, "phy link up");
vars->phy_link_up = 1;
vars->duplex = DUPLEX_FULL;
@@ -4436,7 +4438,7 @@ static void elink_sync_link(struct elink_params *params,
vars->mac_type = ELINK_MAC_TYPE_EMAC;
}
} else { /* Link down */
- PMD_DRV_LOG(DEBUG, "phy link down");
+ PMD_DRV_LOG(DEBUG, sc, "phy link down");
vars->phy_link_up = 0;
@@ -4493,7 +4495,7 @@ void elink_link_status_update(struct elink_params *params,
params->phy[ELINK_EXT_PHY2].media_type =
(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
- PMD_DRV_LOG(DEBUG, "media_types = 0x%x", media_types);
+ PMD_DRV_LOG(DEBUG, sc, "media_types = 0x%x", media_types);
/* Sync AEU offset */
sync_offset = params->shmem_base +
@@ -4514,9 +4516,9 @@ void elink_link_status_update(struct elink_params *params,
vars->link_attr_sync = SHMEM2_RD(sc,
link_attr_sync[params->port]);
- PMD_DRV_LOG(DEBUG, "link_status 0x%x phy_link_up %x int_mask 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "link_status 0x%x phy_link_up %x int_mask 0x%x",
vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
- PMD_DRV_LOG(DEBUG, "line_speed %x duplex %x flow_ctrl 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "line_speed %x duplex %x flow_ctrl 0x%x",
vars->line_speed, vars->duplex, vars->flow_ctrl);
}
@@ -4577,7 +4579,7 @@ static elink_status_t elink_reset_unicore(struct elink_params *params,
elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, params->port); // "Warning: PHY was not initialized,"
// " Port %d",
- PMD_DRV_LOG(DEBUG, "BUG! XGXS is still in reset!");
+ PMD_DRV_LOG(DEBUG, sc, "BUG! XGXS is still in reset!");
return ELINK_STATUS_ERROR;
}
@@ -4636,7 +4638,7 @@ static void elink_set_parallel_detection(struct elink_phy *phy,
control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
else
control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
- PMD_DRV_LOG(DEBUG, "phy->speed_cap_mask = 0x%x, control2 = 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "phy->speed_cap_mask = 0x%x, control2 = 0x%x",
phy->speed_cap_mask, control2);
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
@@ -4644,7 +4646,7 @@ static void elink_set_parallel_detection(struct elink_phy *phy,
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
- PMD_DRV_LOG(DEBUG, "XGXS");
+ PMD_DRV_LOG(DEBUG, sc, "XGXS");
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_10G_PARALLEL_DETECT,
@@ -4797,7 +4799,7 @@ static void elink_program_serdes(struct elink_phy *phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
/* Clearing the speed value before setting the right speed */
- PMD_DRV_LOG(DEBUG, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x", reg_val);
+ PMD_DRV_LOG(DEBUG, sc, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x", reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
@@ -4865,7 +4867,7 @@ static void elink_restart_autoneg(struct elink_phy *phy,
struct bnx2x_softc *sc = params->sc;
uint16_t mii_control;
- PMD_DRV_LOG(DEBUG, "elink_restart_autoneg");
+ PMD_DRV_LOG(DEBUG, sc, "elink_restart_autoneg");
/* Enable and restart BAM/CL37 aneg */
if (enable_cl73) {
@@ -4885,7 +4887,7 @@ static void elink_restart_autoneg(struct elink_phy *phy,
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"elink_restart_autoneg mii_control before = 0x%x",
mii_control);
CL22_WR_OVER_CL45(sc, phy,
@@ -4944,7 +4946,7 @@ static void elink_initialize_sgmii_process(struct elink_phy *phy,
break;
default:
/* Invalid speed for SGMII */
- PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Invalid line_speed 0x%x",
vars->line_speed);
break;
}
@@ -4979,7 +4981,7 @@ static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_STATUS2, &status2_1000x);
if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
- PMD_DRV_LOG(DEBUG, "1G parallel detect link on port %d",
+ PMD_DRV_LOG(DEBUG, sc, "1G parallel detect link on port %d",
params->port);
return ELINK_STATUS_ERROR;
}
@@ -4989,7 +4991,7 @@ static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy,
MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS, &pd_10g);
if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
- PMD_DRV_LOG(DEBUG, "10G parallel detect link on port %d",
+ PMD_DRV_LOG(DEBUG, sc, "10G parallel detect link on port %d",
params->port);
return ELINK_STATUS_ERROR;
}
@@ -5020,7 +5022,7 @@ static void elink_update_adv_fc(struct elink_phy *phy,
MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8;
pause_result |= (lp_pause &
MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10;
- PMD_DRV_LOG(DEBUG, "pause_result CL73 0x%x", pause_result);
+ PMD_DRV_LOG(DEBUG, sc, "pause_result CL73 0x%x", pause_result);
} else {
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_COMBO_IEEE0,
@@ -5033,7 +5035,7 @@ static void elink_update_adv_fc(struct elink_phy *phy,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 5;
pause_result |= (lp_pause &
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) >> 7;
- PMD_DRV_LOG(DEBUG, "pause_result CL37 0x%x", pause_result);
+ PMD_DRV_LOG(DEBUG, sc, "pause_result CL37 0x%x", pause_result);
}
elink_pause_resolve(vars, pause_result);
@@ -5062,7 +5064,7 @@ static void elink_flow_ctrl_resolve(struct elink_phy *phy,
}
elink_update_adv_fc(phy, params, vars, gp_status);
}
- PMD_DRV_LOG(DEBUG, "flow_ctrl 0x%x", vars->flow_ctrl);
+ PMD_DRV_LOG(DEBUG, params->sc, "flow_ctrl 0x%x", vars->flow_ctrl);
}
static void elink_check_fallback_to_cl37(struct elink_phy *phy,
@@ -5070,13 +5072,13 @@ static void elink_check_fallback_to_cl37(struct elink_phy *phy,
{
struct bnx2x_softc *sc = params->sc;
uint16_t rx_status, ustat_val, cl37_fsm_received;
- PMD_DRV_LOG(DEBUG, "elink_check_fallback_to_cl37");
+ PMD_DRV_LOG(DEBUG, sc, "elink_check_fallback_to_cl37");
/* Step 1: Make sure signal is detected */
CL22_RD_OVER_CL45(sc, phy,
MDIO_REG_BANK_RX0, MDIO_RX0_RX_STATUS, &rx_status);
if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
(MDIO_RX0_RX_STATUS_SIGDET)) {
- PMD_DRV_LOG(DEBUG, "Signal is not detected. Restoring CL73."
+ PMD_DRV_LOG(DEBUG, sc, "Signal is not detected. Restoring CL73."
"rx_status(0x80b0) = 0x%x", rx_status);
CL22_WR_OVER_CL45(sc, phy,
MDIO_REG_BANK_CL73_IEEEB0,
@@ -5093,7 +5095,7 @@ static void elink_check_fallback_to_cl37(struct elink_phy *phy,
MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
(MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
- PMD_DRV_LOG(DEBUG, "CL73 state-machine is not stable. "
+ PMD_DRV_LOG(DEBUG, sc, "CL73 state-machine is not stable. "
"ustat_val(0x8371) = 0x%x", ustat_val);
return;
}
@@ -5108,7 +5110,7 @@ static void elink_check_fallback_to_cl37(struct elink_phy *phy,
MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
(MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
- PMD_DRV_LOG(DEBUG, "No CL37 FSM were received. "
+ PMD_DRV_LOG(DEBUG, sc, "No CL37 FSM were received. "
"misc_rx_status(0x8330) = 0x%x", cl37_fsm_received);
return;
}
@@ -5124,7 +5126,7 @@ static void elink_check_fallback_to_cl37(struct elink_phy *phy,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL, 0);
/* Restart CL37 autoneg */
elink_restart_autoneg(phy, params, 0);
- PMD_DRV_LOG(DEBUG, "Disabling CL73, and restarting CL37 autoneg");
+ PMD_DRV_LOG(DEBUG, sc, "Disabling CL73, and restarting CL37 autoneg");
}
static void elink_xgxs_an_resolve(struct elink_phy *phy,
@@ -5148,7 +5150,7 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
if (phy->req_line_speed == ELINK_SPEED_AUTO_NEG)
vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
if (is_link_up) {
- PMD_DRV_LOG(DEBUG, "phy link up");
+ PMD_DRV_LOG(DEBUG, params->sc, "phy link up");
vars->phy_link_up = 1;
vars->link_status |= LINK_STATUS_LINK_UP;
@@ -5189,7 +5191,7 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
case ELINK_GP_STATUS_5G:
case ELINK_GP_STATUS_6G:
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, params->sc,
"link speed unsupported gp_status 0x%x",
speed_mask);
return ELINK_STATUS_ERROR;
@@ -5209,13 +5211,13 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
vars->link_status |= ELINK_LINK_20GTFD;
break;
default:
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, params->sc,
"link speed unsupported gp_status 0x%x",
speed_mask);
return ELINK_STATUS_ERROR;
}
} else { /* link_down */
- PMD_DRV_LOG(DEBUG, "phy link down");
+ PMD_DRV_LOG(DEBUG, params->sc, "phy link down");
vars->phy_link_up = 0;
@@ -5223,7 +5225,7 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy,
vars->flow_ctrl = ELINK_FLOW_CTRL_NONE;
vars->mac_type = ELINK_MAC_TYPE_NONE;
}
- PMD_DRV_LOG(DEBUG, " phy_link_up %x line_speed %d",
+ PMD_DRV_LOG(DEBUG, params->sc, " phy_link_up %x line_speed %d",
vars->phy_link_up, vars->line_speed);
return ELINK_STATUS_OK;
}
@@ -5246,7 +5248,7 @@ static uint8_t elink_link_settings_status(struct elink_phy *phy,
if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
link_up = 1;
speed_mask = gp_status & ELINK_GP_STATUS_SPEED_MASK;
- PMD_DRV_LOG(DEBUG, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x",
gp_status, link_up, speed_mask);
rc = elink_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
duplex);
@@ -5296,7 +5298,7 @@ static uint8_t elink_link_settings_status(struct elink_phy *phy,
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
}
- PMD_DRV_LOG(DEBUG, "duplex %x flow_ctrl 0x%x link_status 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "duplex %x flow_ctrl 0x%x link_status 0x%x",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
}
@@ -5322,7 +5324,7 @@ static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
uint16_t temp_link_up;
elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 1, &temp_link_up);
elink_cl45_read(sc, phy, MDIO_WC_DEVAD, 1, &link_up);
- PMD_DRV_LOG(DEBUG, "PCS RX link status = 0x%x-->0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "PCS RX link status = 0x%x-->0x%x",
temp_link_up, link_up);
link_up &= (1 << 2);
if (link_up)
@@ -5330,7 +5332,7 @@ static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
} else {
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
- PMD_DRV_LOG(DEBUG, "0x81d1 = 0x%x", gp_status1);
+ PMD_DRV_LOG(DEBUG, sc, "0x81d1 = 0x%x", gp_status1);
/* Check for either KR, 1G, or AN up. */
link_up = ((gp_status1 >> 8) |
(gp_status1 >> 12) | (gp_status1)) & (1 << lane);
@@ -5400,7 +5402,7 @@ static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
}
- PMD_DRV_LOG(DEBUG, "lane %d gp_speed 0x%x", lane, gp_speed);
+ PMD_DRV_LOG(DEBUG, sc, "lane %d gp_speed 0x%x", lane, gp_speed);
if ((lane & 1) == 0)
gp_speed <<= 8;
@@ -5416,7 +5418,7 @@ static uint8_t elink_warpcore_read_status(struct elink_phy *phy,
(!(phy->flags & ELINK_FLAGS_WC_DUAL_MODE)))
vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
- PMD_DRV_LOG(DEBUG, "duplex %x flow_ctrl 0x%x link_status 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "duplex %x flow_ctrl 0x%x link_status 0x%x",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
}
@@ -5463,7 +5465,7 @@ static elink_status_t elink_emac_program(struct elink_params *params,
uint8_t port = params->port;
uint16_t mode = 0;
- PMD_DRV_LOG(DEBUG, "setting link speed & duplex");
+ PMD_DRV_LOG(DEBUG, sc, "setting link speed & duplex");
elink_bits_dis(sc, GRCBASE_EMAC0 + port * 0x400 +
EMAC_REG_EMAC_MODE,
(EMAC_MODE_25G_MODE |
@@ -5487,7 +5489,8 @@ static elink_status_t elink_emac_program(struct elink_params *params,
default:
/* 10G not valid for EMAC */
- PMD_DRV_LOG(DEBUG, "Invalid line_speed 0x%x", vars->line_speed);
+ PMD_DRV_LOG(DEBUG, sc,
+ "Invalid line_speed 0x%x", vars->line_speed);
return ELINK_STATUS_ERROR;
}
@@ -5539,7 +5542,7 @@ static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
if (vars->line_speed != ELINK_SPEED_AUTO_NEG ||
(ELINK_SINGLE_MEDIA_DIRECT(params) &&
params->loopback_mode == ELINK_LOOPBACK_EXT)) {
- PMD_DRV_LOG(DEBUG, "not SGMII, no AN");
+ PMD_DRV_LOG(DEBUG, params->sc, "not SGMII, no AN");
/* Disable autoneg */
elink_set_autoneg(phy, params, vars, 0);
@@ -5548,7 +5551,7 @@ static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
elink_program_serdes(phy, params, vars);
} else { /* AN_mode */
- PMD_DRV_LOG(DEBUG, "not SGMII, AN");
+ PMD_DRV_LOG(DEBUG, params->sc, "not SGMII, AN");
/* AN enabled */
elink_set_brcm_cl37_advertisement(phy, params);
@@ -5565,7 +5568,7 @@ static uint8_t elink_xgxs_config_init(struct elink_phy *phy,
}
} else { /* SGMII mode */
- PMD_DRV_LOG(DEBUG, "SGMII");
+ PMD_DRV_LOG(DEBUG, params->sc, "SGMII");
elink_initialize_sgmii_process(phy, params, vars);
}
@@ -5634,7 +5637,7 @@ static uint16_t elink_wait_reset_complete(struct bnx2x_softc *sc,
elink_cb_event_log(sc, ELINK_LOG_ID_PHY_UNINITIALIZED, params->port); // "Warning: PHY was not initialized,"
// " Port %d",
- PMD_DRV_LOG(DEBUG, "control reg 0x%x (after %d ms)", ctrl, cnt);
+ PMD_DRV_LOG(DEBUG, sc, "control reg 0x%x (after %d ms)", ctrl, cnt);
return cnt;
}
@@ -5652,35 +5655,35 @@ static void elink_link_int_enable(struct elink_params *params)
} else if (params->switch_cfg == ELINK_SWITCH_CFG_10G) {
mask = (ELINK_NIG_MASK_XGXS0_LINK10G |
ELINK_NIG_MASK_XGXS0_LINK_STATUS);
- PMD_DRV_LOG(DEBUG, "enabled XGXS interrupt");
+ PMD_DRV_LOG(DEBUG, sc, "enabled XGXS interrupt");
if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) &&
params->phy[ELINK_INT_PHY].type !=
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
mask |= ELINK_NIG_MASK_MI_INT;
- PMD_DRV_LOG(DEBUG, "enabled external phy int");
+ PMD_DRV_LOG(DEBUG, sc, "enabled external phy int");
}
} else { /* SerDes */
mask = ELINK_NIG_MASK_SERDES0_LINK_STATUS;
- PMD_DRV_LOG(DEBUG, "enabled SerDes interrupt");
+ PMD_DRV_LOG(DEBUG, sc, "enabled SerDes interrupt");
if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) &&
params->phy[ELINK_INT_PHY].type !=
PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
mask |= ELINK_NIG_MASK_MI_INT;
- PMD_DRV_LOG(DEBUG, "enabled external phy int");
+ PMD_DRV_LOG(DEBUG, sc, "enabled external phy int");
}
}
elink_bits_en(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4, mask);
- PMD_DRV_LOG(DEBUG, "port %x, is_xgxs %x, int_status 0x%x", port,
+ PMD_DRV_LOG(DEBUG, sc, "port %x, is_xgxs %x, int_status 0x%x", port,
(params->switch_cfg == ELINK_SWITCH_CFG_10G),
REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4));
- PMD_DRV_LOG(DEBUG, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x",
+ PMD_DRV_LOG(DEBUG, sc, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x",
REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4),
REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port * 0x18),
REG_RD(sc,
NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c));
- PMD_DRV_LOG(DEBUG, " 10G %x, XGXS_LINK %x",
+ PMD_DRV_LOG(DEBUG, sc, " 10G %x, XGXS_LINK %x",
REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68),
REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68));
}
@@ -5696,7 +5699,7 @@ static void elink_rearm_latch_signal(struct bnx2x_softc *sc, uint8_t port,
*/
/* Read Latched signals */
latch_status = REG_RD(sc, NIG_REG_LATCH_STATUS_0 + port * 8);
- PMD_DRV_LOG(DEBUG, "latch_status = 0x%x", latch_status);
+ PMD_DRV_LOG(DEBUG, sc, "latch_status = 0x%x", latch_status);
/* Handle only those with latched-signal=up. */
if (exp_mi_int)
elink_bits_en(sc,
@@ -5748,7 +5751,7 @@ static void elink_link_int_ack(struct elink_params *params,
} else
mask = ELINK_NIG_STATUS_SERDES0_LINK_STATUS;
}
- PMD_DRV_LOG(DEBUG, "Ack link up interrupt with mask 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Ack link up interrupt with mask 0x%x",
mask);
elink_bits_en(sc,
NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4, mask);
@@ -5811,7 +5814,7 @@ static void elink_set_xgxs_loopback(struct elink_phy *phy,
if (phy->req_line_speed != ELINK_SPEED_1000) {
uint32_t md_devad = 0;
- PMD_DRV_LOG(DEBUG, "XGXS 10G loopback enable");
+ PMD_DRV_LOG(DEBUG, sc, "XGXS 10G loopback enable");
if (!CHIP_IS_E3(sc)) {
/* Change the uni_phy_addr in the nig */
@@ -5843,7 +5846,7 @@ static void elink_set_xgxs_loopback(struct elink_phy *phy,
}
} else {
uint16_t mii_ctrl;
- PMD_DRV_LOG(DEBUG, "XGXS 1G loopback enable");
+ PMD_DRV_LOG(DEBUG, sc, "XGXS 1G loopback enable");
elink_cl45_read(sc, phy, 5,
(MDIO_REG_BANK_COMBO_IEEE0 +
(MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
@@ -5867,8 +5870,9 @@ elink_status_t elink_set_led(struct elink_params *params,
uint32_t tmp;
uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "elink_set_led: port %x, mode %d", port, mode);
- PMD_DRV_LOG(DEBUG, "speed 0x%x, hw_led_mode 0x%x", speed, hw_led_mode);
+ PMD_DRV_LOG(DEBUG, sc, "elink_set_led: port %x, mode %d", port, mode);
+ PMD_DRV_LOG(DEBUG, sc,
+ "speed 0x%x, hw_led_mode 0x%x", speed, hw_led_mode);
/* In case */
for (phy_idx = ELINK_EXT_PHY1; phy_idx < ELINK_MAX_PHYS; phy_idx++) {
if (params->phy[phy_idx].set_link_led) {
@@ -5988,7 +5992,8 @@ elink_status_t elink_set_led(struct elink_params *params,
default:
rc = ELINK_STATUS_ERROR;
- PMD_DRV_LOG(DEBUG, "elink_set_led: Invalid led mode %d", mode);
+ PMD_DRV_LOG(DEBUG, sc,
+ "elink_set_led: Invalid led mode %d", mode);
break;
}
return rc;
@@ -6055,7 +6060,7 @@ static elink_status_t elink_link_initialize(struct elink_params *params,
if (phy_index == ELINK_EXT_PHY2 &&
(elink_phy_selection(params) ==
PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Not initializing second phy");
continue;
}
@@ -6096,7 +6101,7 @@ static void elink_common_ext_link_reset(__rte_unused struct elink_phy *phy,
MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port);
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port);
- PMD_DRV_LOG(DEBUG, "reset external PHY");
+ PMD_DRV_LOG(DEBUG, sc, "reset external PHY");
}
static elink_status_t elink_update_link_down(struct elink_params *params,
@@ -6105,7 +6110,7 @@ static elink_status_t elink_update_link_down(struct elink_params *params,
struct bnx2x_softc *sc = params->sc;
uint8_t port = params->port;
- PMD_DRV_LOG(DEBUG, "Port %x: Link is down", port);
+ PMD_DRV_LOG(DEBUG, sc, "Port %x: Link is down", port);
elink_set_led(params, vars, ELINK_LED_MODE_OFF, 0);
vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
/* Indicate no mac active */
@@ -6166,7 +6171,7 @@ static elink_status_t elink_update_link_up(struct elink_params *params,
if (link_10g) {
if (elink_xmac_enable(params, vars, 0) ==
ELINK_STATUS_NO_LINK) {
- PMD_DRV_LOG(DEBUG, "Found errors on XMAC");
+ PMD_DRV_LOG(DEBUG, sc, "Found errors on XMAC");
vars->link_up = 0;
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
vars->link_status &= ~LINK_STATUS_LINK_UP;
@@ -6178,7 +6183,7 @@ static elink_status_t elink_update_link_up(struct elink_params *params,
if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
(vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
- PMD_DRV_LOG(DEBUG, "Enabling LPI assertion");
+ PMD_DRV_LOG(DEBUG, sc, "Enabling LPI assertion");
REG_WR(sc, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
(params->port << 2), 1);
REG_WR(sc, MISC_REG_CPMU_LP_DR_ENABLE, 1);
@@ -6190,7 +6195,7 @@ static elink_status_t elink_update_link_up(struct elink_params *params,
if (link_10g) {
if (elink_bmac_enable(params, vars, 0, 1) ==
ELINK_STATUS_NO_LINK) {
- PMD_DRV_LOG(DEBUG, "Found errors on BMAC");
+ PMD_DRV_LOG(DEBUG, sc, "Found errors on BMAC");
vars->link_up = 0;
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
vars->link_status &= ~LINK_STATUS_LINK_UP;
@@ -6275,19 +6280,19 @@ elink_status_t elink_link_update(struct elink_params * params,
if (USES_WARPCORE(sc))
elink_set_aer_mmd(params, &params->phy[ELINK_INT_PHY]);
- PMD_DRV_LOG(DEBUG, "port %x, XGXS?%x, int_status 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "port %x, XGXS?%x, int_status 0x%x",
port, (vars->phy_flags & PHY_XGXS_FLAG),
REG_RD(sc, NIG_REG_STATUS_INTERRUPT_PORT0 + port * 4));
is_mi_int = (uint8_t) (REG_RD(sc, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
port * 0x18) > 0);
- PMD_DRV_LOG(DEBUG, "int_mask 0x%x MI_INT %x, SERDES_LINK %x",
+ PMD_DRV_LOG(DEBUG, sc, "int_mask 0x%x MI_INT %x, SERDES_LINK %x",
REG_RD(sc, NIG_REG_MASK_INTERRUPT_PORT0 + port * 4),
is_mi_int,
REG_RD(sc,
NIG_REG_SERDES0_STATUS_LINK_STATUS + port * 0x3c));
- PMD_DRV_LOG(DEBUG, " 10G %x, XGXS_LINK %x",
+ PMD_DRV_LOG(DEBUG, sc, " 10G %x, XGXS_LINK %x",
REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK10G + port * 0x68),
REG_RD(sc, NIG_REG_XGXS0_STATUS_LINK_STATUS + port * 0x68));
@@ -6311,10 +6316,10 @@ elink_status_t elink_link_update(struct elink_params * params,
cur_link_up = phy->read_status(phy, params,
&phy_vars[phy_index]);
if (cur_link_up) {
- PMD_DRV_LOG(DEBUG, "phy in index %d link is up",
+ PMD_DRV_LOG(DEBUG, sc, "phy in index %d link is up",
phy_index);
} else {
- PMD_DRV_LOG(DEBUG, "phy in index %d link is down",
+ PMD_DRV_LOG(DEBUG, sc, "phy in index %d link is down",
phy_index);
continue;
}
@@ -6347,7 +6352,7 @@ elink_status_t elink_link_update(struct elink_params * params,
* to link up by itself (using configuration)
* - DEFAULT should be overridden during initialization
*/
- PMD_DRV_LOG(DEBUG, "Invalid link indication"
+ PMD_DRV_LOG(DEBUG, sc, "Invalid link indication"
"mpc=0x%x. DISABLING LINK !!!",
params->multi_phy_config);
ext_phy_link_up = 0;
@@ -6385,7 +6390,7 @@ elink_status_t elink_link_update(struct elink_params * params,
*/
if (active_external_phy == ELINK_EXT_PHY1) {
if (params->phy[ELINK_EXT_PHY2].phy_specific_func) {
- PMD_DRV_LOG(DEBUG, "Disabling TX on EXT_PHY2");
+ PMD_DRV_LOG(DEBUG, sc, "Disabling TX on EXT_PHY2");
params->phy[ELINK_EXT_PHY2].
phy_specific_func(&params->
phy[ELINK_EXT_PHY2],
@@ -6403,7 +6408,7 @@ elink_status_t elink_link_update(struct elink_params * params,
vars->eee_status = phy_vars[active_external_phy].eee_status;
- PMD_DRV_LOG(DEBUG, "Active external phy selected: %x",
+ PMD_DRV_LOG(DEBUG, sc, "Active external phy selected: %x",
active_external_phy);
}
@@ -6417,7 +6422,7 @@ elink_status_t elink_link_update(struct elink_params * params,
break;
}
}
- PMD_DRV_LOG(DEBUG, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
+ PMD_DRV_LOG(DEBUG, sc, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
" ext_phy_line_speed = %d", vars->flow_ctrl,
vars->link_status, ext_phy_line_speed);
/* Upon link speed change set the NIG into drain mode. Comes to
@@ -6428,7 +6433,7 @@ elink_status_t elink_link_update(struct elink_params * params,
if (vars->phy_link_up) {
if (!(ELINK_SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
(ext_phy_line_speed != vars->line_speed)) {
- PMD_DRV_LOG(DEBUG, "Internal link speed %d is"
+ PMD_DRV_LOG(DEBUG, sc, "Internal link speed %d is"
" different than the external"
" link speed %d", vars->line_speed,
ext_phy_line_speed);
@@ -6454,7 +6459,7 @@ elink_status_t elink_link_update(struct elink_params * params,
* initialize it
*/
if (!(ELINK_SINGLE_MEDIA_DIRECT(params))) {
- PMD_DRV_LOG(DEBUG, "ext_phy_link_up = %d, int_link_up = %d,"
+ PMD_DRV_LOG(DEBUG, sc, "ext_phy_link_up = %d, int_link_up = %d,"
" init_preceding = %d", ext_phy_link_up,
vars->phy_link_up,
params->phy[ELINK_EXT_PHY1].flags &
@@ -6519,7 +6524,7 @@ static void elink_save_spirom_version(struct bnx2x_softc *sc,
__rte_unused uint8_t port,
uint32_t spirom_ver, uint32_t ver_addr)
{
- PMD_DRV_LOG(DEBUG, "FW version 0x%x:0x%x for port %d",
+ PMD_DRV_LOG(DEBUG, sc, "FW version 0x%x:0x%x for port %d",
(uint16_t) (spirom_ver >> 16), (uint16_t) spirom_ver, port);
if (ver_addr)
@@ -6585,7 +6590,7 @@ static void elink_8073_resolve_fc(struct elink_phy *phy,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
elink_pause_resolve(vars, pause_result);
- PMD_DRV_LOG(DEBUG, "Ext PHY CL37 pause result 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Ext PHY CL37 pause result 0x%x",
pause_result);
}
}
@@ -6629,7 +6634,7 @@ static elink_status_t elink_8073_8727_external_rom_boot(struct bnx2x_softc *sc,
do {
count++;
if (count > 300) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"elink_8073_8727_external_rom_boot port %x:"
"Download failed. fw version = 0x%x",
port, fw_ver1);
@@ -6654,7 +6659,7 @@ static elink_status_t elink_8073_8727_external_rom_boot(struct bnx2x_softc *sc,
MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL1, 0x0000);
elink_save_bnx2x_spirom_ver(sc, phy, port);
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"elink_8073_8727_external_rom_boot port %x:"
"Download complete. fw version = 0x%x", port, fw_ver1);
@@ -6715,10 +6720,10 @@ static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc,
* these bits indicate 2.5G or 1G link up).
*/
if (!(val & (1 << 14)) || !(val & (1 << 13))) {
- PMD_DRV_LOG(DEBUG, "XAUI work-around not required");
+ PMD_DRV_LOG(DEBUG, sc, "XAUI work-around not required");
return ELINK_STATUS_OK;
} else if (!(val & (1 << 15))) {
- PMD_DRV_LOG(DEBUG, "bit 15 went off");
+ PMD_DRV_LOG(DEBUG, sc, "bit 15 went off");
/* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
* MSB (bit15) goes to 1 (indicating that the XAUI
* workaround has completed), then continue on with
@@ -6730,7 +6735,7 @@ static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc,
MDIO_PMA_REG_8073_XAUI_WA,
&val);
if (val & (1 << 15)) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"XAUI workaround has completed");
return ELINK_STATUS_OK;
}
@@ -6740,7 +6745,7 @@ static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc,
}
DELAY(1000 * 3);
}
- PMD_DRV_LOG(DEBUG, "Warning: XAUI work-around timeout !!!");
+ PMD_DRV_LOG(DEBUG, sc, "Warning: XAUI work-around timeout !!!");
return ELINK_STATUS_ERROR;
}
@@ -6782,7 +6787,7 @@ static void elink_8073_set_pause_cl37(struct elink_params *params,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
}
- PMD_DRV_LOG(DEBUG, "Ext phy AN advertize cl37 0x%x", cl37_val);
+ PMD_DRV_LOG(DEBUG, sc, "Ext phy AN advertize cl37 0x%x", cl37_val);
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
@@ -6813,7 +6818,7 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
struct bnx2x_softc *sc = params->sc;
uint16_t val = 0, tmp1;
uint8_t gpio_port;
- PMD_DRV_LOG(DEBUG, "Init 8073");
+ PMD_DRV_LOG(DEBUG, sc, "Init 8073");
if (CHIP_IS_E2(sc))
gpio_port = SC_PATH(sc);
@@ -6834,12 +6839,12 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
- PMD_DRV_LOG(DEBUG, "Before rom RX_ALARM(port1): 0x%x", tmp1);
+ PMD_DRV_LOG(DEBUG, sc, "Before rom RX_ALARM(port1): 0x%x", tmp1);
/* Swap polarity if required - Must be done only in non-1G mode */
if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
/* Configure the 8073 to swap _P and _N of the KR lines */
- PMD_DRV_LOG(DEBUG, "Swapping polarity for the 8073");
+ PMD_DRV_LOG(DEBUG, sc, "Swapping polarity for the 8073");
/* 10G Rx/Tx and 1G Tx signal polarity swap */
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
@@ -6861,11 +6866,11 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8073_BAM, &val);
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8073_BAM, val | 1);
- PMD_DRV_LOG(DEBUG, "Enable CL37 BAM on KR");
+ PMD_DRV_LOG(DEBUG, sc, "Enable CL37 BAM on KR");
}
if (params->loopback_mode == ELINK_LOOPBACK_EXT) {
elink_807x_force_10G(sc, phy);
- PMD_DRV_LOG(DEBUG, "Forced speed 10G on 807X");
+ PMD_DRV_LOG(DEBUG, sc, "Forced speed 10G on 807X");
return ELINK_STATUS_OK;
} else {
elink_cl45_write(sc, phy,
@@ -6891,7 +6896,7 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
(PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
val |= (1 << 5);
- PMD_DRV_LOG(DEBUG, "807x autoneg val = 0x%x", val);
+ PMD_DRV_LOG(DEBUG, sc, "807x autoneg val = 0x%x", val);
}
elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
@@ -6905,13 +6910,13 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
&phy_ver);
- PMD_DRV_LOG(DEBUG, "Add 2.5G");
+ PMD_DRV_LOG(DEBUG, sc, "Add 2.5G");
if (phy_ver > 0)
tmp1 |= 1;
else
tmp1 &= 0xfffe;
} else {
- PMD_DRV_LOG(DEBUG, "Disable 2.5G");
+ PMD_DRV_LOG(DEBUG, sc, "Disable 2.5G");
tmp1 &= 0xfffe;
}
@@ -6945,7 +6950,7 @@ static uint8_t elink_8073_config_init(struct elink_phy *phy,
/* Restart autoneg */
DELAY(1000 * 500);
elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
- PMD_DRV_LOG(DEBUG, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x",
+ PMD_DRV_LOG(DEBUG, sc, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x",
((val & (1 << 5)) > 0), ((val & (1 << 7)) > 0));
return ELINK_STATUS_OK;
}
@@ -6962,12 +6967,12 @@ static uint8_t elink_8073_read_status(struct elink_phy *phy,
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
- PMD_DRV_LOG(DEBUG, "8703 LASI status 0x%x", val1);
+ PMD_DRV_LOG(DEBUG, sc, "8703 LASI status 0x%x", val1);
/* Clear the interrupt LASI status register */
elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
- PMD_DRV_LOG(DEBUG, "807x PCS status 0x%x->0x%x", val2, val1);
+ PMD_DRV_LOG(DEBUG, sc, "807x PCS status 0x%x->0x%x", val2, val1);
/* Clear MSG-OUT */
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
@@ -6975,16 +6980,16 @@ static uint8_t elink_8073_read_status(struct elink_phy *phy,
/* Check the LASI */
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
- PMD_DRV_LOG(DEBUG, "KR 0x9003 0x%x", val2);
+ PMD_DRV_LOG(DEBUG, sc, "KR 0x9003 0x%x", val2);
/* Check the link status */
elink_cl45_read(sc, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
- PMD_DRV_LOG(DEBUG, "KR PCS status 0x%x", val2);
+ PMD_DRV_LOG(DEBUG, sc, "KR PCS status 0x%x", val2);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
link_up = ((val1 & 4) == 4);
- PMD_DRV_LOG(DEBUG, "PMA_REG_STATUS=0x%x", val1);
+ PMD_DRV_LOG(DEBUG, sc, "PMA_REG_STATUS=0x%x", val1);
if (link_up && ((phy->req_line_speed != ELINK_SPEED_10000))) {
if (elink_8073_xaui_wa(sc, phy) != 0)
@@ -6998,7 +7003,7 @@ static uint8_t elink_8073_read_status(struct elink_phy *phy,
/* Check the link status on 1.1.2 */
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
- PMD_DRV_LOG(DEBUG, "KR PMA status 0x%x->0x%x,"
+ PMD_DRV_LOG(DEBUG, sc, "KR PMA status 0x%x->0x%x,"
"an_link_status=0x%x", val2, val1, an1000_status);
link_up = (((val1 & 4) == 4) || (an1000_status & (1 << 1)));
@@ -7024,21 +7029,21 @@ static uint8_t elink_8073_read_status(struct elink_phy *phy,
if ((link_status & (1 << 2)) && (!(link_status & (1 << 15)))) {
link_up = 1;
vars->line_speed = ELINK_SPEED_10000;
- PMD_DRV_LOG(DEBUG, "port %x: External link up in 10G",
+ PMD_DRV_LOG(DEBUG, sc, "port %x: External link up in 10G",
params->port);
} else if ((link_status & (1 << 1)) && (!(link_status & (1 << 14)))) {
link_up = 1;
vars->line_speed = ELINK_SPEED_2500;
- PMD_DRV_LOG(DEBUG, "port %x: External link up in 2.5G",
+ PMD_DRV_LOG(DEBUG, sc, "port %x: External link up in 2.5G",
params->port);
} else if ((link_status & (1 << 0)) && (!(link_status & (1 << 13)))) {
link_up = 1;
vars->line_speed = ELINK_SPEED_1000;
- PMD_DRV_LOG(DEBUG, "port %x: External link up in 1G",
+ PMD_DRV_LOG(DEBUG, sc, "port %x: External link up in 1G",
params->port);
} else {
link_up = 0;
- PMD_DRV_LOG(DEBUG, "port %x: External link is down",
+ PMD_DRV_LOG(DEBUG, sc, "port %x: External link is down",
params->port);
}
@@ -7053,7 +7058,7 @@ static uint8_t elink_8073_read_status(struct elink_phy *phy,
* when it`s in 10G mode.
*/
if (vars->line_speed == ELINK_SPEED_1000) {
- PMD_DRV_LOG(DEBUG, "Swapping 1G polarity for"
+ PMD_DRV_LOG(DEBUG, sc, "Swapping 1G polarity for"
"the 8073");
val1 |= (1 << 3);
} else
@@ -7092,7 +7097,7 @@ static void elink_8073_link_reset(__rte_unused struct elink_phy *phy,
gpio_port = SC_PATH(sc);
else
gpio_port = params->port;
- PMD_DRV_LOG(DEBUG, "Setting 8073 port %d into low power mode",
+ PMD_DRV_LOG(DEBUG, sc, "Setting 8073 port %d into low power mode",
gpio_port);
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
MISC_REGISTERS_GPIO_OUTPUT_LOW, gpio_port);
@@ -7107,7 +7112,7 @@ static uint8_t elink_8705_config_init(struct elink_phy *phy,
*vars)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "init 8705");
+ PMD_DRV_LOG(DEBUG, sc, "init 8705");
/* Restore normal power mode */
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
@@ -7135,21 +7140,21 @@ static uint8_t elink_8705_read_status(struct elink_phy *phy,
uint8_t link_up = 0;
uint16_t val1, rx_sd;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "read status 8705");
+ PMD_DRV_LOG(DEBUG, sc, "read status 8705");
elink_cl45_read(sc, phy,
MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
- PMD_DRV_LOG(DEBUG, "8705 LASI status 0x%x", val1);
+ PMD_DRV_LOG(DEBUG, sc, "8705 LASI status 0x%x", val1);
elink_cl45_read(sc, phy,
MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
- PMD_DRV_LOG(DEBUG, "8705 LASI status 0x%x", val1);
+ PMD_DRV_LOG(DEBUG, sc, "8705 LASI status 0x%x", val1);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xc809, &val1);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, 0xc809, &val1);
- PMD_DRV_LOG(DEBUG, "8705 1.c809 val=0x%x", val1);
+ PMD_DRV_LOG(DEBUG, sc, "8705 1.c809 val=0x%x", val1);
link_up = ((rx_sd & 0x1) && (val1 & (1 << 9))
&& ((val1 & (1 << 8)) == 0));
if (link_up) {
@@ -7173,13 +7178,13 @@ static void elink_set_disable_pmd_transmit(struct elink_params *params,
if (pmd_dis) {
if (params->feature_config_flags &
ELINK_FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED) {
- PMD_DRV_LOG(DEBUG, "Disabling PMD transmitter");
+ PMD_DRV_LOG(DEBUG, sc, "Disabling PMD transmitter");
} else {
- PMD_DRV_LOG(DEBUG, "NOT disabling PMD transmitter");
+ PMD_DRV_LOG(DEBUG, sc, "NOT disabling PMD transmitter");
return;
}
} else {
- PMD_DRV_LOG(DEBUG, "Enabling PMD transmitter");
+ PMD_DRV_LOG(DEBUG, sc, "Enabling PMD transmitter");
}
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_TX_DISABLE, pmd_dis);
@@ -7213,7 +7218,7 @@ static void elink_sfp_e1e2_set_transmitter(struct elink_params *params,
offsetof(struct shmem_region,
dev_info.port_hw_config[port].sfp_ctrl)) &
PORT_HW_CFG_TX_LASER_MASK;
- PMD_DRV_LOG(DEBUG, "Setting transmitter tx_en=%x for port %x "
+ PMD_DRV_LOG(DEBUG, sc, "Setting transmitter tx_en=%x for port %x "
"mode = %x", tx_en, port, tx_en_mode);
switch (tx_en_mode) {
case PORT_HW_CFG_TX_LASER_MDIO:
@@ -7249,7 +7254,8 @@ static void elink_sfp_e1e2_set_transmitter(struct elink_params *params,
break;
}
default:
- PMD_DRV_LOG(DEBUG, "Invalid TX_LASER_MDIO 0x%x", tx_en_mode);
+ PMD_DRV_LOG(DEBUG, sc,
+ "Invalid TX_LASER_MDIO 0x%x", tx_en_mode);
break;
}
}
@@ -7258,7 +7264,7 @@ static void elink_sfp_set_transmitter(struct elink_params *params,
struct elink_phy *phy, uint8_t tx_en)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Setting SFP+ transmitter to %d", tx_en);
+ PMD_DRV_LOG(DEBUG, sc, "Setting SFP+ transmitter to %d", tx_en);
if (CHIP_IS_E3(sc))
elink_sfp_e3_set_transmitter(params, phy, tx_en);
else
@@ -7279,7 +7285,7 @@ static elink_status_t elink_8726_read_sfp_module_eeprom(struct elink_phy *phy,
uint16_t val = 0;
uint16_t i;
if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) {
- PMD_DRV_LOG(DEBUG, "Reading from eeprom is limited to 0xf");
+ PMD_DRV_LOG(DEBUG, sc, "Reading from eeprom is limited to 0xf");
return ELINK_STATUS_ERROR;
}
/* Set the read command byte count */
@@ -7310,7 +7316,7 @@ static elink_status_t elink_8726_read_sfp_module_eeprom(struct elink_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Got bad status 0x%x when reading from SFP+ EEPROM",
(val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
return ELINK_STATUS_ERROR;
@@ -7351,7 +7357,7 @@ static void elink_warpcore_power_module(struct elink_params *params,
if (pin_cfg == PIN_CFG_NA)
return;
- PMD_DRV_LOG(DEBUG, "Setting SFP+ module power to %d using pin cfg %d",
+ PMD_DRV_LOG(DEBUG, sc, "Setting SFP+ module power to %d using pin cfg %d",
power, pin_cfg);
/* Low ==> corresponding SFP+ module is powered
* high ==> the SFP+ module is powered down
@@ -7376,7 +7382,7 @@ static elink_status_t elink_warpcore_read_sfp_module_eeprom(__rte_unused struct
struct bnx2x_softc *sc = params->sc;
if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Reading from eeprom is limited to 16 bytes");
return ELINK_STATUS_ERROR;
}
@@ -7418,7 +7424,7 @@ static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy,
uint16_t val, i;
if (byte_cnt > ELINK_SFP_EEPROM_PAGE_SIZE) {
- PMD_DRV_LOG(DEBUG, "Reading from eeprom is limited to 0xf");
+ PMD_DRV_LOG(DEBUG, sc, "Reading from eeprom is limited to 0xf");
return ELINK_STATUS_ERROR;
}
@@ -7472,7 +7478,7 @@ static elink_status_t elink_8727_read_sfp_module_eeprom(struct elink_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Got bad status 0x%x when reading from SFP+ EEPROM",
(val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
return ELINK_STATUS_TIMEOUT;
@@ -7513,7 +7519,8 @@ static elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy,
read_sfp_module_eeprom_func_p read_func;
if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
- PMD_DRV_LOG(DEBUG, "invalid dev_addr 0x%x", dev_addr);
+ PMD_DRV_LOG(DEBUG, params->sc,
+ "invalid dev_addr 0x%x", dev_addr);
return ELINK_STATUS_ERROR;
}
@@ -7559,7 +7566,7 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
ELINK_I2C_DEV_ADDR_A0,
ELINK_SFP_EEPROM_CON_TYPE_ADDR,
2, (uint8_t *) val) != 0) {
- PMD_DRV_LOG(DEBUG, "Failed to read from SFP+ module EEPROM");
+ PMD_DRV_LOG(DEBUG, sc, "Failed to read from SFP+ module EEPROM");
return ELINK_STATUS_ERROR;
}
@@ -7578,7 +7585,7 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
1,
&copper_module_type) !=
0) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Failed to read copper-cable-type"
" from SFP+ EEPROM");
return ELINK_STATUS_ERROR;
@@ -7586,7 +7593,7 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
if (copper_module_type &
ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Active Copper cable detected");
if (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
@@ -7596,11 +7603,11 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
} else if (copper_module_type &
ELINK_SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE)
{
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Passive Copper cable detected");
*edc_mode = ELINK_EDC_MODE_PASSIVE_DAC;
} else {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Unknown copper-cable-type 0x%x !!!",
copper_module_type);
return ELINK_STATUS_ERROR;
@@ -7613,7 +7620,7 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
if ((val[1] & (ELINK_SFP_EEPROM_COMP_CODE_SR_MASK |
ELINK_SFP_EEPROM_COMP_CODE_LR_MASK |
ELINK_SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
- PMD_DRV_LOG(DEBUG, "1G SFP module detected");
+ PMD_DRV_LOG(DEBUG, sc, "1G SFP module detected");
gport = params->port;
phy->media_type = ELINK_ETH_PHY_SFP_1G_FIBER;
if (phy->req_line_speed != ELINK_SPEED_1000) {
@@ -7629,7 +7636,7 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
}
} else {
int idx, cfg_idx = 0;
- PMD_DRV_LOG(DEBUG, "10G Optic module detected");
+ PMD_DRV_LOG(DEBUG, sc, "10G Optic module detected");
for (idx = ELINK_INT_PHY; idx < ELINK_MAX_PHYS; idx++) {
if (params->phy[idx].type == phy->type) {
cfg_idx = ELINK_LINK_CONFIG_IDX(idx);
@@ -7641,7 +7648,7 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
}
break;
default:
- PMD_DRV_LOG(DEBUG, "Unable to determine module type 0x%x !!!",
+ PMD_DRV_LOG(DEBUG, sc, "Unable to determine module type 0x%x !!!",
val[0]);
return ELINK_STATUS_ERROR;
}
@@ -7671,7 +7678,7 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
ELINK_SFP_EEPROM_OPTIONS_ADDR,
ELINK_SFP_EEPROM_OPTIONS_SIZE,
options) != 0) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Failed to read Option field from module EEPROM");
return ELINK_STATUS_ERROR;
}
@@ -7680,7 +7687,7 @@ static elink_status_t elink_get_edc_mode(struct elink_phy *phy,
else
*edc_mode = ELINK_EDC_MODE_LIMITING;
}
- PMD_DRV_LOG(DEBUG, "EDC mode is set to 0x%x", *edc_mode);
+ PMD_DRV_LOG(DEBUG, sc, "EDC mode is set to 0x%x", *edc_mode);
return ELINK_STATUS_OK;
}
@@ -7702,7 +7709,7 @@ static elink_status_t elink_verify_sfp_module(struct elink_phy *phy,
config));
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
- PMD_DRV_LOG(DEBUG, "NOT enforcing module verification");
+ PMD_DRV_LOG(DEBUG, sc, "NOT enforcing module verification");
return ELINK_STATUS_OK;
}
@@ -7714,21 +7721,21 @@ static elink_status_t elink_verify_sfp_module(struct elink_phy *phy,
ELINK_FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
/* Use first phy request only in case of non-dual media */
if (ELINK_DUAL_MEDIA(params)) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"FW does not support OPT MDL verification");
return ELINK_STATUS_ERROR;
}
cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
} else {
/* No support in OPT MDL detection */
- PMD_DRV_LOG(DEBUG, "FW does not support OPT MDL verification");
+ PMD_DRV_LOG(DEBUG, sc, "FW does not support OPT MDL verification");
return ELINK_STATUS_ERROR;
}
fw_cmd_param = ELINK_FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
fw_resp = elink_cb_fw_command(sc, cmd, fw_cmd_param);
if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
- PMD_DRV_LOG(DEBUG, "Approved module");
+ PMD_DRV_LOG(DEBUG, sc, "Approved module");
return ELINK_STATUS_OK;
}
@@ -7784,7 +7791,7 @@ static elink_status_t elink_wait_for_sfp_module_initialized(struct elink_phy
ELINK_I2C_DEV_ADDR_A0,
1, 1, &val);
if (rc == 0) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, params->sc,
"SFP+ module initialization took %d ms",
timeout * 5);
return ELINK_STATUS_OK;
@@ -7835,17 +7842,18 @@ static elink_status_t elink_8726_set_limiting_mode(struct bnx2x_softc *sc,
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2, &cur_limiting_mode);
- PMD_DRV_LOG(DEBUG, "Current Limiting mode is 0x%x", cur_limiting_mode);
+ PMD_DRV_LOG(DEBUG, sc,
+ "Current Limiting mode is 0x%x", cur_limiting_mode);
if (edc_mode == ELINK_EDC_MODE_LIMITING) {
- PMD_DRV_LOG(DEBUG, "Setting LIMITING MODE");
+ PMD_DRV_LOG(DEBUG, sc, "Setting LIMITING MODE");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_ROM_VER2,
ELINK_EDC_MODE_LIMITING);
} else { /* LRM mode ( default ) */
- PMD_DRV_LOG(DEBUG, "Setting LRM MODE");
+ PMD_DRV_LOG(DEBUG, sc, "Setting LRM MODE");
/* Changing to LRM mode takes quite few seconds. So do it only
* if current mode is limiting (default is LRM)
@@ -7935,7 +7943,7 @@ static void elink_8727_specific_func(struct elink_phy *phy,
val);
break;
default:
- PMD_DRV_LOG(DEBUG, "Function 0x%x not supported by 8727",
+ PMD_DRV_LOG(DEBUG, sc, "Function 0x%x not supported by 8727",
action);
return;
}
@@ -7963,14 +7971,14 @@ static void elink_set_e1e2_module_fault_led(struct elink_params *params,
uint8_t gpio_port = elink_get_gpio_port(params);
uint16_t gpio_pin = fault_led_gpio -
PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
- PMD_DRV_LOG(DEBUG, "Set fault module-detected led "
+ PMD_DRV_LOG(DEBUG, sc, "Set fault module-detected led "
"pin %x port %x mode %x",
gpio_pin, gpio_port, gpio_mode);
elink_cb_gpio_write(sc, gpio_pin, gpio_mode, gpio_port);
}
break;
default:
- PMD_DRV_LOG(DEBUG, "Error: Invalid fault led mode 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Error: Invalid fault led mode 0x%x",
fault_led_gpio);
}
}
@@ -7986,7 +7994,7 @@ static void elink_set_e3_module_fault_led(struct elink_params *params,
dev_info.port_hw_config[port].e3_sfp_ctrl)) &
PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
- PMD_DRV_LOG(DEBUG, "Setting Fault LED to %d using pin cfg %d",
+ PMD_DRV_LOG(DEBUG, sc, "Setting Fault LED to %d using pin cfg %d",
gpio_mode, pin_cfg);
elink_set_cfg_pin(sc, pin_cfg, gpio_mode);
}
@@ -7995,7 +8003,8 @@ static void elink_set_sfp_module_fault_led(struct elink_params *params,
uint8_t gpio_mode)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Setting SFP+ module fault LED to %d", gpio_mode);
+ PMD_DRV_LOG(DEBUG, sc,
+ "Setting SFP+ module fault LED to %d", gpio_mode);
if (CHIP_IS_E3(sc)) {
/* Low ==> if SFP+ module is supported otherwise
* High ==> if SFP+ module is not on the approved vendor list
@@ -8022,7 +8031,7 @@ static void elink_warpcore_hw_reset(__rte_unused struct elink_phy *phy,
static void elink_power_sfp_module(struct elink_params *params,
struct elink_phy *phy, uint8_t power)
{
- PMD_DRV_LOG(DEBUG, "Setting SFP+ power to %x", power);
+ PMD_DRV_LOG(DEBUG, params->sc, "Setting SFP+ power to %x", power);
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727:
@@ -8108,16 +8117,16 @@ static elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
config));
/* Enabled transmitter by default */
elink_sfp_set_transmitter(params, phy, 1);
- PMD_DRV_LOG(DEBUG, "SFP+ module plugged in/out detected on port %d",
+ PMD_DRV_LOG(DEBUG, sc, "SFP+ module plugged in/out detected on port %d",
params->port);
/* Power up module */
elink_power_sfp_module(params, phy, 1);
if (elink_get_edc_mode(phy, params, &edc_mode) != 0) {
- PMD_DRV_LOG(DEBUG, "Failed to get valid module type");
+ PMD_DRV_LOG(DEBUG, sc, "Failed to get valid module type");
return ELINK_STATUS_ERROR;
} else if (elink_verify_sfp_module(phy, params) != 0) {
/* Check SFP+ module compatibility */
- PMD_DRV_LOG(DEBUG, "Module verification failed!!");
+ PMD_DRV_LOG(DEBUG, sc, "Module verification failed!!");
rc = ELINK_STATUS_ERROR;
/* Turn on fault module-detected led */
elink_set_sfp_module_fault_led(params,
@@ -8126,7 +8135,7 @@ static elink_status_t elink_sfp_module_detection(struct elink_phy *phy,
/* Check if need to power down the SFP+ module */
if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
- PMD_DRV_LOG(DEBUG, "Shutdown SFP+ module!!");
+ PMD_DRV_LOG(DEBUG, sc, "Shutdown SFP+ module!!");
elink_power_sfp_module(params, phy, 0);
return rc;
}
@@ -8167,7 +8176,7 @@ void elink_handle_module_detect_int(struct elink_params *params)
if (elink_get_mod_abs_int_cfg(sc, params->shmem_base,
params->port, &gpio_num, &gpio_port) ==
ELINK_STATUS_ERROR) {
- PMD_DRV_LOG(DEBUG, "Failed to get MOD_ABS interrupt config");
+ PMD_DRV_LOG(DEBUG, sc, "Failed to get MOD_ABS interrupt config");
return;
}
@@ -8207,7 +8216,7 @@ void elink_handle_module_detect_int(struct elink_params *params)
}
}
} else {
- PMD_DRV_LOG(DEBUG, "SFP+ module is not initialized");
+ PMD_DRV_LOG(DEBUG, sc, "SFP+ module is not initialized");
}
} else {
elink_cb_gpio_int_write(sc, gpio_num,
@@ -8252,7 +8261,7 @@ static uint8_t elink_8706_8726_read_status(struct elink_phy *phy,
uint8_t link_up = 0;
uint16_t val1, val2, rx_sd, pcs_status;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "XGXS 8706/8726");
+ PMD_DRV_LOG(DEBUG, sc, "XGXS 8706/8726");
/* Clear RX Alarm */
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
@@ -8262,7 +8271,8 @@ static uint8_t elink_8706_8726_read_status(struct elink_phy *phy,
/* Clear LASI indication */
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
- PMD_DRV_LOG(DEBUG, "8706/8726 LASI status 0x%x--> 0x%x", val1, val2);
+ PMD_DRV_LOG(DEBUG, sc,
+ "8706/8726 LASI status 0x%x--> 0x%x", val1, val2);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
elink_cl45_read(sc, phy,
@@ -8270,7 +8280,7 @@ static uint8_t elink_8706_8726_read_status(struct elink_phy *phy,
elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
elink_cl45_read(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
- PMD_DRV_LOG(DEBUG, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
+ PMD_DRV_LOG(DEBUG, sc, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
" link_status 0x%x", rx_sd, pcs_status, val2);
/* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
* are set, or if the autoneg bit 1 is set
@@ -8324,7 +8334,7 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
break;
DELAY(1000 * 10);
}
- PMD_DRV_LOG(DEBUG, "XGXS 8706 is initialized after %d ms", cnt);
+ PMD_DRV_LOG(DEBUG, sc, "XGXS 8706 is initialized after %d ms", cnt);
if ((params->feature_config_flags &
ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
uint8_t i;
@@ -8338,14 +8348,14 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
val &= ~0x7;
/* Set control bits according to configuration */
val |= (phy->rx_preemphasis[i] & 0x7);
- PMD_DRV_LOG(DEBUG, "Setting RX Equalizer to BNX2X8706"
+ PMD_DRV_LOG(DEBUG, sc, "Setting RX Equalizer to BNX2X8706"
" reg 0x%x <-- val 0x%x", reg, val);
elink_cl45_write(sc, phy, MDIO_XS_DEVAD, reg, val);
}
}
/* Force speed */
if (phy->req_line_speed == ELINK_SPEED_10000) {
- PMD_DRV_LOG(DEBUG, "XGXS 8706 force 10Gbps");
+ PMD_DRV_LOG(DEBUG, sc, "XGXS 8706 force 10Gbps");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
@@ -8359,7 +8369,7 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
/* Force 1Gbps using autoneg with 1G advertisement */
/* Allow CL37 through CL73 */
- PMD_DRV_LOG(DEBUG, "XGXS 8706 AutoNeg");
+ PMD_DRV_LOG(DEBUG, sc, "XGXS 8706 AutoNeg");
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
@@ -8394,7 +8404,7 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy,
& PORT_HW_CFG_TX_LASER_MASK;
if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
- PMD_DRV_LOG(DEBUG, "Enabling TXONOFF_PWRDN_DIS");
+ PMD_DRV_LOG(DEBUG, sc, "Enabling TXONOFF_PWRDN_DIS");
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL,
&tmp1);
@@ -8421,7 +8431,7 @@ static void elink_8726_config_loopback(struct elink_phy *phy,
struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "PMA/PMD ext_phy_loopback: 8726");
+ PMD_DRV_LOG(DEBUG, sc, "PMA/PMD ext_phy_loopback: 8726");
elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
}
@@ -8473,7 +8483,7 @@ static uint8_t elink_8726_read_status(struct elink_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
&val1);
if (val1 & (1 << 15)) {
- PMD_DRV_LOG(DEBUG, "Tx is disabled");
+ PMD_DRV_LOG(DEBUG, sc, "Tx is disabled");
link_up = 0;
vars->line_speed = 0;
}
@@ -8486,7 +8496,7 @@ static uint8_t elink_8726_config_init(struct elink_phy *phy,
struct elink_vars *vars)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726");
+ PMD_DRV_LOG(DEBUG, sc, "Initializing BNX2X8726");
elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1 << 15);
elink_wait_reset_complete(sc, phy, params);
@@ -8501,7 +8511,7 @@ static uint8_t elink_8726_config_init(struct elink_phy *phy,
elink_sfp_module_detection(phy, params);
if (phy->req_line_speed == ELINK_SPEED_1000) {
- PMD_DRV_LOG(DEBUG, "Setting 1G force");
+ PMD_DRV_LOG(DEBUG, sc, "Setting 1G force");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
elink_cl45_write(sc, phy,
@@ -8516,7 +8526,7 @@ static uint8_t elink_8726_config_init(struct elink_phy *phy,
((phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
- PMD_DRV_LOG(DEBUG, "Setting 1G clause37");
+ PMD_DRV_LOG(DEBUG, sc, "Setting 1G clause37");
/* Set Flow control */
elink_ext_phy_set_pause(params, phy, vars);
elink_cl45_write(sc, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
@@ -8544,7 +8554,7 @@ static uint8_t elink_8726_config_init(struct elink_phy *phy,
/* Set TX PreEmphasis if needed */
if ((params->feature_config_flags &
ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x",
phy->tx_preemphasis[0], phy->tx_preemphasis[1]);
elink_cl45_write(sc, phy,
@@ -8566,7 +8576,7 @@ static void elink_8726_link_reset(struct elink_phy *phy,
struct elink_params *params)
{
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "elink_8726_link_reset port %d", params->port);
+ PMD_DRV_LOG(DEBUG, sc, "elink_8726_link_reset port %d", params->port);
/* Set serial boot control for external load */
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
@@ -8639,14 +8649,14 @@ static void elink_8727_config_speed(struct elink_phy *phy,
/* Set option 1G speed */
if ((phy->req_line_speed == ELINK_SPEED_1000) ||
(phy->media_type == ELINK_ETH_PHY_SFP_1G_FIBER)) {
- PMD_DRV_LOG(DEBUG, "Setting 1G force");
+ PMD_DRV_LOG(DEBUG, sc, "Setting 1G force");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
- PMD_DRV_LOG(DEBUG, "1.7 = 0x%x", tmp1);
+ PMD_DRV_LOG(DEBUG, sc, "1.7 = 0x%x", tmp1);
/* Power down the XAUI until link is up in case of dual-media
* and 1G
*/
@@ -8666,7 +8676,7 @@ static void elink_8727_config_speed(struct elink_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
- PMD_DRV_LOG(DEBUG, "Setting 1G clause37");
+ PMD_DRV_LOG(DEBUG, sc, "Setting 1G clause37");
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
elink_cl45_write(sc, phy,
@@ -8700,7 +8710,7 @@ static uint8_t elink_8727_config_init(struct elink_phy *phy,
elink_wait_reset_complete(sc, phy, params);
- PMD_DRV_LOG(DEBUG, "Initializing BNX2X8727");
+ PMD_DRV_LOG(DEBUG, sc, "Initializing BNX2X8727");
elink_8727_specific_func(phy, params, ELINK_PHY_INIT);
/* Initially configure MOD_ABS to interrupt when module is
@@ -8733,7 +8743,7 @@ static uint8_t elink_8727_config_init(struct elink_phy *phy,
/* Set TX PreEmphasis if needed */
if ((params->feature_config_flags &
ELINK_FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
- PMD_DRV_LOG(DEBUG, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x",
phy->tx_preemphasis[0], phy->tx_preemphasis[1]);
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
@@ -8755,7 +8765,7 @@ static uint8_t elink_8727_config_init(struct elink_phy *phy,
if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
- PMD_DRV_LOG(DEBUG, "Enabling TXONOFF_PWRDN_DIS");
+ PMD_DRV_LOG(DEBUG, sc, "Enabling TXONOFF_PWRDN_DIS");
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG,
&tmp2);
@@ -8787,7 +8797,7 @@ static void elink_8727_handle_mod_abs(struct elink_phy *phy,
if (mod_abs & (1 << 8)) {
/* Module is absent */
- PMD_DRV_LOG(DEBUG, "MOD_ABS indication show module is absent");
+ PMD_DRV_LOG(DEBUG, sc, "MOD_ABS indication show module is absent");
phy->media_type = ELINK_ETH_PHY_NOT_PRESENT;
/* 1. Set mod_abs to detect next module
* presence event
@@ -8812,7 +8822,7 @@ static void elink_8727_handle_mod_abs(struct elink_phy *phy,
} else {
/* Module is present */
- PMD_DRV_LOG(DEBUG, "MOD_ABS indication show module is present");
+ PMD_DRV_LOG(DEBUG, sc, "MOD_ABS indication show module is present");
/* First disable transmitter, and if the module is ok, the
* module_detection will enable it
* 1. Set mod_abs to detect next module absent event ( bit 8)
@@ -8843,14 +8853,14 @@ static void elink_8727_handle_mod_abs(struct elink_phy *phy,
if (elink_wait_for_sfp_module_initialized(phy, params) == 0) {
elink_sfp_module_detection(phy, params);
} else {
- PMD_DRV_LOG(DEBUG, "SFP+ module is not initialized");
+ PMD_DRV_LOG(DEBUG, sc, "SFP+ module is not initialized");
}
/* Reconfigure link speed based on module type limitations */
elink_8727_config_speed(phy, params);
}
- PMD_DRV_LOG(DEBUG, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status);
+ PMD_DRV_LOG(DEBUG, sc, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status);
/* No need to check link status in case of module plugged in/out */
}
@@ -8873,14 +8883,14 @@ static uint8_t elink_8727_read_status(struct elink_phy *phy,
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
vars->line_speed = 0;
- PMD_DRV_LOG(DEBUG, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status);
+ PMD_DRV_LOG(DEBUG, sc, "8727 RX_ALARM_STATUS 0x%x", rx_alarm_status);
elink_sfp_mask_fault(sc, phy, MDIO_PMA_LASI_TXSTAT,
MDIO_PMA_LASI_TXCTRL);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
- PMD_DRV_LOG(DEBUG, "8727 LASI status 0x%x", val1);
+ PMD_DRV_LOG(DEBUG, sc, "8727 LASI status 0x%x", val1);
/* Clear MSG-OUT */
elink_cl45_read(sc, phy,
@@ -8898,7 +8908,7 @@ static uint8_t elink_8727_read_status(struct elink_phy *phy,
if ((val1 & (1 << 8)) == 0) {
if (!CHIP_IS_E1x(sc))
oc_port = SC_PATH(sc) + (params->port << 1);
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"8727 Power fault has been detected on port %d",
oc_port);
elink_cb_event_log(sc, ELINK_LOG_ID_OVER_CURRENT, oc_port); //"Error: Power fault on Port %d has "
@@ -8941,10 +8951,10 @@ static uint8_t elink_8727_read_status(struct elink_phy *phy,
}
if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) {
- PMD_DRV_LOG(DEBUG, "Enabling 8727 TX laser");
+ PMD_DRV_LOG(DEBUG, sc, "Enabling 8727 TX laser");
elink_sfp_set_transmitter(params, phy, 1);
} else {
- PMD_DRV_LOG(DEBUG, "Tx is disabled");
+ PMD_DRV_LOG(DEBUG, sc, "Tx is disabled");
return 0;
}
@@ -8958,16 +8968,16 @@ static uint8_t elink_8727_read_status(struct elink_phy *phy,
if ((link_status & (1 << 2)) && (!(link_status & (1 << 15)))) {
link_up = 1;
vars->line_speed = ELINK_SPEED_10000;
- PMD_DRV_LOG(DEBUG, "port %x: External link up in 10G",
+ PMD_DRV_LOG(DEBUG, sc, "port %x: External link up in 10G",
params->port);
} else if ((link_status & (1 << 0)) && (!(link_status & (1 << 13)))) {
link_up = 1;
vars->line_speed = ELINK_SPEED_1000;
- PMD_DRV_LOG(DEBUG, "port %x: External link up in 1G",
+ PMD_DRV_LOG(DEBUG, sc, "port %x: External link up in 1G",
params->port);
} else {
link_up = 0;
- PMD_DRV_LOG(DEBUG, "port %x: External link is down",
+ PMD_DRV_LOG(DEBUG, sc, "port %x: External link is down",
params->port);
}
@@ -8987,7 +8997,7 @@ static uint8_t elink_8727_read_status(struct elink_phy *phy,
if (link_up) {
elink_ext_phy_resolve_fc(phy, params, vars);
vars->duplex = DUPLEX_FULL;
- PMD_DRV_LOG(DEBUG, "duplex = 0x%x", vars->duplex);
+ PMD_DRV_LOG(DEBUG, sc, "duplex = 0x%x", vars->duplex);
}
if ((ELINK_DUAL_MEDIA(params)) &&
@@ -9059,7 +9069,7 @@ static void elink_save_848xx_spirom_version(struct elink_phy *phy,
DELAY(5);
}
if (cnt == 100) {
- PMD_DRV_LOG(DEBUG, "Unable to read 848xx "
+ PMD_DRV_LOG(DEBUG, sc, "Unable to read 848xx "
"phy fw version(1)");
elink_save_spirom_version(sc, port, 0, phy->ver_addr);
return;
@@ -9076,7 +9086,7 @@ static void elink_save_848xx_spirom_version(struct elink_phy *phy,
DELAY(5);
}
if (cnt == 100) {
- PMD_DRV_LOG(DEBUG, "Unable to read 848xx phy fw "
+ PMD_DRV_LOG(DEBUG, sc, "Unable to read 848xx phy fw "
"version(2)");
elink_save_spirom_version(sc, port, 0, phy->ver_addr);
return;
@@ -9189,7 +9199,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
autoneg_val |= (1 << 9 | 1 << 12);
if (phy->req_duplex == DUPLEX_FULL)
an_1000_val |= (1 << 9);
- PMD_DRV_LOG(DEBUG, "Advertising 1G");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 1G");
} else
an_1000_val &= ~((1 << 8) | (1 << 9));
@@ -9205,7 +9215,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
*/
autoneg_val |= (1 << 9 | 1 << 12);
an_10_100_val |= (1 << 8);
- PMD_DRV_LOG(DEBUG, "Advertising 100M-FD");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 100M-FD");
}
if (phy->speed_cap_mask &
@@ -9214,7 +9224,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
*/
autoneg_val |= (1 << 9 | 1 << 12);
an_10_100_val |= (1 << 7);
- PMD_DRV_LOG(DEBUG, "Advertising 100M-HD");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 100M-HD");
}
if ((phy->speed_cap_mask &
@@ -9222,7 +9232,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
(phy->supported & ELINK_SUPPORTED_10baseT_Full)) {
an_10_100_val |= (1 << 6);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 10M-FD");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 10M-FD");
}
if ((phy->speed_cap_mask &
@@ -9230,7 +9240,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
(phy->supported & ELINK_SUPPORTED_10baseT_Half)) {
an_10_100_val |= (1 << 5);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 10M-HD");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 10M-HD");
}
}
@@ -9245,7 +9255,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
(1 << 15 | 1 << 9 | 7 << 0));
/* The PHY needs this set even for forced link. */
an_10_100_val |= (1 << 8) | (1 << 7);
- PMD_DRV_LOG(DEBUG, "Setting 100M force");
+ PMD_DRV_LOG(DEBUG, sc, "Setting 100M force");
}
if ((phy->req_line_speed == ELINK_SPEED_10) &&
(phy->supported &
@@ -9254,7 +9264,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
elink_cl45_write(sc, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
(1 << 15 | 1 << 9 | 7 << 0));
- PMD_DRV_LOG(DEBUG, "Setting 10M force");
+ PMD_DRV_LOG(DEBUG, sc, "Setting 10M force");
}
elink_cl45_write(sc, phy,
@@ -9278,7 +9288,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy,
(phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
(phy->req_line_speed == ELINK_SPEED_10000)) {
- PMD_DRV_LOG(DEBUG, "Advertising 10G");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 10G");
/* Restart autoneg for 10G */
elink_cl45_read_or_write(sc, phy,
@@ -9334,7 +9344,7 @@ static elink_status_t elink_84833_cmd_hdlr(struct elink_phy *phy,
DELAY(1000 * 1);
}
if (idx >= PHY84833_CMDHDLR_WAIT) {
- PMD_DRV_LOG(DEBUG, "FW cmd: FW not ready.");
+ PMD_DRV_LOG(DEBUG, sc, "FW cmd: FW not ready.");
return ELINK_STATUS_ERROR;
}
@@ -9356,7 +9366,7 @@ static elink_status_t elink_84833_cmd_hdlr(struct elink_phy *phy,
}
if ((idx >= PHY84833_CMDHDLR_WAIT) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
- PMD_DRV_LOG(DEBUG, "FW cmd failed.");
+ PMD_DRV_LOG(DEBUG, sc, "FW cmd failed.");
return ELINK_STATUS_ERROR;
}
/* Gather returning data */
@@ -9398,7 +9408,7 @@ static elink_status_t elink_84833_pair_swap_cfg(struct elink_phy *phy,
PHY84833_CMD_SET_PAIR_SWAP, data,
PHY84833_CMDHDLR_MAX_ARGS);
if (status == ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Pairswap OK, val=0x%x", data[1]);
+ PMD_DRV_LOG(DEBUG, sc, "Pairswap OK, val=0x%x", data[1]);
}
return status;
@@ -9474,7 +9484,8 @@ static void elink_84833_hw_reset_phy(struct elink_phy *phy,
elink_cb_gpio_mult_write(sc, reset_gpios,
MISC_REGISTERS_GPIO_OUTPUT_LOW);
DELAY(10);
- PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios);
+ PMD_DRV_LOG(DEBUG, sc,
+ "84833 hw reset on pin values 0x%x", reset_gpios);
}
static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy,
@@ -9484,13 +9495,13 @@ static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy,
elink_status_t rc;
uint16_t cmd_args = 0;
- PMD_DRV_LOG(DEBUG, "Don't Advertise 10GBase-T EEE");
+ PMD_DRV_LOG(DEBUG, params->sc, "Don't Advertise 10GBase-T EEE");
/* Prevent Phy from working in EEE and advertising it */
rc = elink_84833_cmd_hdlr(phy, params,
PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "EEE disable failed.");
+ PMD_DRV_LOG(DEBUG, params->sc, "EEE disable failed.");
return rc;
}
@@ -9507,7 +9518,7 @@ static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy,
rc = elink_84833_cmd_hdlr(phy, params,
PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "EEE enable failed.");
+ PMD_DRV_LOG(DEBUG, params->sc, "EEE enable failed.");
return rc;
}
@@ -9600,7 +9611,7 @@ static uint8_t elink_848x3_config_init(struct elink_phy *phy,
elink_cl45_write(sc, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_MEDIA, val);
- PMD_DRV_LOG(DEBUG, "Multi_phy config = 0x%x, Media control = 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Multi_phy config = 0x%x, Media control = 0x%x",
params->multi_phy_config, val);
if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X84833) ||
@@ -9616,7 +9627,7 @@ static uint8_t elink_848x3_config_init(struct elink_phy *phy,
PHY84833_CMD_SET_EEE_MODE, cmd_args,
PHY84833_CMDHDLR_MAX_ARGS);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Cfg AutogrEEEn failed.");
+ PMD_DRV_LOG(DEBUG, sc, "Cfg AutogrEEEn failed.");
}
}
if (initialize) {
@@ -9653,7 +9664,7 @@ static uint8_t elink_848x3_config_init(struct elink_phy *phy,
elink_eee_has_cap(params)) {
rc = elink_eee_initial_config(params, vars, SHMEM_EEE_10G_ADV);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Failed to configure EEE timers");
+ PMD_DRV_LOG(DEBUG, sc, "Failed to configure EEE timers");
elink_8483x_disable_eee(phy, params, vars);
return rc;
}
@@ -9666,7 +9677,7 @@ static uint8_t elink_848x3_config_init(struct elink_phy *phy,
else
rc = elink_8483x_disable_eee(phy, params, vars);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Failed to set EEE advertisement");
+ PMD_DRV_LOG(DEBUG, sc, "Failed to set EEE advertisement");
return rc;
}
} else {
@@ -9698,7 +9709,7 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
elink_cl45_read(sc, phy, MDIO_AN_DEVAD, 0xFFFA, &val1);
elink_cl45_read(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL, &val2);
- PMD_DRV_LOG(DEBUG, "BNX2X848xx: PMD_SIGNAL 1.a811 = 0x%x", val2);
+ PMD_DRV_LOG(DEBUG, sc, "BNX2X848xx: PMD_SIGNAL 1.a811 = 0x%x", val2);
/* Check link 10G */
if (val2 & (1 << 11)) {
@@ -9720,7 +9731,8 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
&legacy_status);
- PMD_DRV_LOG(DEBUG, "Legacy speed status = 0x%x", legacy_status);
+ PMD_DRV_LOG(DEBUG, sc,
+ "Legacy speed status = 0x%x", legacy_status);
link_up = ((legacy_status & (1 << 11)) == (1 << 11));
legacy_speed = (legacy_status & (3 << 9));
if (legacy_speed == (0 << 9))
@@ -9750,7 +9762,7 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
else
vars->duplex = DUPLEX_HALF;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Link is up in %dMbps, is_duplex_full= %d",
vars->line_speed,
(vars->duplex == DUPLEX_FULL));
@@ -9772,7 +9784,7 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy,
}
}
if (link_up) {
- PMD_DRV_LOG(DEBUG, "BNX2X848x3: link speed is %d",
+ PMD_DRV_LOG(DEBUG, sc, "BNX2X848x3: link speed is %d",
vars->line_speed);
elink_ext_phy_resolve_fc(phy, params, vars);
@@ -9889,7 +9901,7 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
switch (mode) {
case ELINK_LED_MODE_OFF:
- PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE OFF", port);
+ PMD_DRV_LOG(DEBUG, sc, "Port 0x%x: LED MODE OFF", port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
@@ -9919,7 +9931,8 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
break;
case ELINK_LED_MODE_FRONT_PANEL_OFF:
- PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE FRONT PANEL OFF", port);
+ PMD_DRV_LOG(DEBUG, sc,
+ "Port 0x%x: LED MODE FRONT PANEL OFF", port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
@@ -9969,7 +9982,7 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
break;
case ELINK_LED_MODE_ON:
- PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE ON", port);
+ PMD_DRV_LOG(DEBUG, sc, "Port 0x%x: LED MODE ON", port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
@@ -10029,7 +10042,7 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
case ELINK_LED_MODE_OPER:
- PMD_DRV_LOG(DEBUG, "Port 0x%x: LED MODE OPER", port);
+ PMD_DRV_LOG(DEBUG, sc, "Port 0x%x: LED MODE OPER", port);
if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
SHARED_HW_CFG_LED_EXTPHY1) {
@@ -10044,7 +10057,7 @@ static void elink_848xx_set_link_led(struct elink_phy *phy,
>>
MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT))
{
- PMD_DRV_LOG(DEBUG, "Setting LINK_SIGNAL");
+ PMD_DRV_LOG(DEBUG, sc, "Setting LINK_SIGNAL");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL,
@@ -10157,7 +10170,7 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
uint16_t autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
uint32_t cfg_pin;
- PMD_DRV_LOG(DEBUG, "54618SE cfg init");
+ PMD_DRV_LOG(DEBUG, sc, "54618SE cfg init");
DELAY(1000 * 1);
/* This works with E3 only, no need to check the chip
@@ -10230,7 +10243,7 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
autoneg_val |= (1 << 9 | 1 << 12);
if (phy->req_duplex == DUPLEX_FULL)
an_1000_val |= (1 << 9);
- PMD_DRV_LOG(DEBUG, "Advertising 1G");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 1G");
} else
an_1000_val &= ~((1 << 8) | (1 << 9));
@@ -10243,25 +10256,25 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
an_10_100_val |= (1 << 5);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 10M-HD");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 10M-HD");
}
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
an_10_100_val |= (1 << 6);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 10M-FD");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 10M-FD");
}
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
an_10_100_val |= (1 << 7);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 100M-HD");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 100M-HD");
}
if (phy->speed_cap_mask &
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
an_10_100_val |= (1 << 8);
autoneg_val |= (1 << 9 | 1 << 12);
- PMD_DRV_LOG(DEBUG, "Advertising 100M-FD");
+ PMD_DRV_LOG(DEBUG, sc, "Advertising 100M-FD");
}
}
@@ -10270,12 +10283,12 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
autoneg_val |= (1 << 13);
/* Enabled AUTO-MDIX when autoneg is disabled */
elink_cl22_write(sc, phy, 0x18, (1 << 15 | 1 << 9 | 7 << 0));
- PMD_DRV_LOG(DEBUG, "Setting 100M force");
+ PMD_DRV_LOG(DEBUG, sc, "Setting 100M force");
}
if (phy->req_line_speed == ELINK_SPEED_10) {
/* Enabled AUTO-MDIX when autoneg is disabled */
elink_cl22_write(sc, phy, 0x18, (1 << 15 | 1 << 9 | 7 << 0));
- PMD_DRV_LOG(DEBUG, "Setting 10M force");
+ PMD_DRV_LOG(DEBUG, sc, "Setting 10M force");
}
if ((phy->flags & ELINK_FLAGS_EEE) && elink_eee_has_cap(params)) {
@@ -10290,7 +10303,7 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
rc = elink_eee_initial_config(params, vars, SHMEM_EEE_1G_ADV);
if (rc != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Failed to configure EEE timers");
+ PMD_DRV_LOG(DEBUG, sc, "Failed to configure EEE timers");
elink_eee_disable(phy, params, vars);
} else if ((params->eee_mode & ELINK_EEE_MODE_ADV_LPI) &&
(phy->req_duplex == DUPLEX_FULL) &&
@@ -10304,7 +10317,7 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
elink_eee_advertise(phy, params, vars,
SHMEM_EEE_1G_ADV);
} else {
- PMD_DRV_LOG(DEBUG, "Don't Advertise 1GBase-T EEE");
+ PMD_DRV_LOG(DEBUG, sc, "Don't Advertise 1GBase-T EEE");
elink_eee_disable(phy, params, vars);
}
} else {
@@ -10316,10 +10329,10 @@ static uint8_t elink_54618se_config_init(struct elink_phy *phy,
if (params->feature_config_flags &
ELINK_FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
temp = 6;
- PMD_DRV_LOG(DEBUG, "Enabling Auto-GrEEEn");
+ PMD_DRV_LOG(DEBUG, sc, "Enabling Auto-GrEEEn");
} else {
temp = 0;
- PMD_DRV_LOG(DEBUG, "Don't Adv. EEE");
+ PMD_DRV_LOG(DEBUG, sc, "Don't Adv. EEE");
}
elink_cl45_write(sc, phy, MDIO_AN_DEVAD,
MDIO_AN_REG_EEE_ADV, temp);
@@ -10347,7 +10360,7 @@ static void elink_5461x_set_link_led(struct elink_phy *phy,
elink_cl22_read(sc, phy, MDIO_REG_GPHY_SHADOW, &temp);
temp &= 0xff00;
- PMD_DRV_LOG(DEBUG, "54618x set link led (mode=%x)", mode);
+ PMD_DRV_LOG(DEBUG, sc, "54618x set link led (mode=%x)", mode);
switch (mode) {
case ELINK_LED_MODE_FRONT_PANEL_OFF:
case ELINK_LED_MODE_OFF:
@@ -10405,7 +10418,7 @@ static uint8_t elink_54618se_read_status(struct elink_phy *phy,
/* Get speed operation status */
elink_cl22_read(sc, phy, MDIO_REG_GPHY_AUX_STATUS, &legacy_status);
- PMD_DRV_LOG(DEBUG, "54618SE read_status: 0x%x", legacy_status);
+ PMD_DRV_LOG(DEBUG, sc, "54618SE read_status: 0x%x", legacy_status);
/* Read status to clear the PHY interrupt. */
elink_cl22_read(sc, phy, MDIO_REG_INTR_STATUS, &val);
@@ -10437,7 +10450,7 @@ static uint8_t elink_54618se_read_status(struct elink_phy *phy,
} else /* Should not happen */
vars->line_speed = 0;
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Link is up in %dMbps, is_duplex_full= %d",
vars->line_speed, (vars->duplex == DUPLEX_FULL));
@@ -10451,7 +10464,7 @@ static uint8_t elink_54618se_read_status(struct elink_phy *phy,
vars->link_status |=
LINK_STATUS_PARALLEL_DETECTION_USED;
- PMD_DRV_LOG(DEBUG, "BNX2X54618SE: link speed is %d",
+ PMD_DRV_LOG(DEBUG, sc, "BNX2X54618SE: link speed is %d",
vars->line_speed);
elink_ext_phy_resolve_fc(phy, params, vars);
@@ -10499,7 +10512,7 @@ static void elink_54618se_config_loopback(struct elink_phy *phy,
uint16_t val;
uint32_t umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
- PMD_DRV_LOG(DEBUG, "2PMA/PMD ext_phy_loopback: 54618se");
+ PMD_DRV_LOG(DEBUG, sc, "2PMA/PMD ext_phy_loopback: 54618se");
/* Enable master/slave manual mmode and set to master */
/* mii write 9 [bits set 11 12] */
@@ -10550,7 +10563,7 @@ static uint8_t elink_7101_config_init(struct elink_phy *phy,
{
uint16_t fw_ver1, fw_ver2, val;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Setting the SFX7101 LASI indication");
+ PMD_DRV_LOG(DEBUG, sc, "Setting the SFX7101 LASI indication");
/* Restore normal power mode */
elink_cb_gpio_write(sc, MISC_REGISTERS_GPIO_2,
@@ -10560,7 +10573,7 @@ static uint8_t elink_7101_config_init(struct elink_phy *phy,
elink_wait_reset_complete(sc, phy, params);
elink_cl45_write(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
- PMD_DRV_LOG(DEBUG, "Setting the SFX7101 LED to blink on traffic");
+ PMD_DRV_LOG(DEBUG, sc, "Setting the SFX7101 LED to blink on traffic");
elink_cl45_write(sc, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1 << 3));
@@ -10591,10 +10604,10 @@ static uint8_t elink_7101_read_status(struct elink_phy *phy,
uint16_t val1, val2;
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
- PMD_DRV_LOG(DEBUG, "10G-base-T LASI status 0x%x->0x%x", val2, val1);
+ PMD_DRV_LOG(DEBUG, sc, "10G-base-T LASI status 0x%x->0x%x", val2, val1);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
elink_cl45_read(sc, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
- PMD_DRV_LOG(DEBUG, "10G-base-T PMA status 0x%x->0x%x", val2, val1);
+ PMD_DRV_LOG(DEBUG, sc, "10G-base-T PMA status 0x%x->0x%x", val2, val1);
link_up = ((val1 & 4) == 4);
/* If link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
@@ -10603,7 +10616,7 @@ static uint8_t elink_7101_read_status(struct elink_phy *phy,
&val2);
vars->line_speed = ELINK_SPEED_10000;
vars->duplex = DUPLEX_FULL;
- PMD_DRV_LOG(DEBUG, "SFX7101 AN status 0x%x->Master=%x",
+ PMD_DRV_LOG(DEBUG, sc, "SFX7101 AN status 0x%x->Master=%x",
val2, (val2 & (1 << 14)));
elink_ext_phy_10G_an_resolve(sc, phy, vars);
elink_ext_phy_resolve_fc(phy, params, vars);
@@ -11210,7 +11223,7 @@ static uint32_t elink_get_ext_phy_config(struct bnx2x_softc *sc,
external_phy_config2));
break;
default:
- PMD_DRV_LOG(DEBUG, "Invalid phy_index %d", phy_index);
+ PMD_DRV_LOG(DEBUG, sc, "Invalid phy_index %d", phy_index);
return ELINK_STATUS_ERROR;
}
@@ -11233,7 +11246,7 @@ static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
(REG_RD(sc, MISC_REG_CHIP_NUM) << 16) |
((REG_RD(sc, MISC_REG_CHIP_REV) & 0xf) << 12);
- PMD_DRV_LOG(DEBUG, ":chip_id = 0x%x", chip_id);
+ PMD_DRV_LOG(DEBUG, sc, ":chip_id = 0x%x", chip_id);
if (USES_WARPCORE(sc)) {
uint32_t serdes_net_if;
phy_addr = REG_RD(sc, MISC_REG_WC0_CTRL_PHY_ADDR);
@@ -11310,7 +11323,7 @@ static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
phy->flags &= ~ELINK_FLAGS_TX_ERROR_CHECK;
break;
default:
- PMD_DRV_LOG(DEBUG, "Unknown WC interface type 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Unknown WC interface type 0x%x",
serdes_net_if);
break;
}
@@ -11338,7 +11351,7 @@ static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
*phy = phy_xgxs;
break;
default:
- PMD_DRV_LOG(DEBUG, "Invalid switch_cfg");
+ PMD_DRV_LOG(DEBUG, sc, "Invalid switch_cfg");
return ELINK_STATUS_ERROR;
}
}
@@ -11351,7 +11364,7 @@ static elink_status_t elink_populate_int_phy(struct bnx2x_softc *sc,
else
phy->def_md_devad = ELINK_DEFAULT_PHY_DEV_ADDR;
- PMD_DRV_LOG(DEBUG, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x",
port, phy->addr, phy->mdio_ctrl);
elink_populate_preemphasis(sc, shmem_base, phy, port, ELINK_INT_PHY);
@@ -11478,9 +11491,9 @@ static elink_status_t elink_populate_ext_phy(struct bnx2x_softc *sc,
ELINK_SUPPORTED_100baseT_Full);
}
- PMD_DRV_LOG(DEBUG, "phy_type 0x%x port %d found in index %d",
+ PMD_DRV_LOG(DEBUG, sc, "phy_type 0x%x port %d found in index %d",
phy_type, port, phy_index);
- PMD_DRV_LOG(DEBUG, " addr=0x%x, mdio_ctl=0x%x",
+ PMD_DRV_LOG(DEBUG, sc, " addr=0x%x, mdio_ctl=0x%x",
phy->addr, phy->mdio_ctrl);
return ELINK_STATUS_OK;
}
@@ -11529,7 +11542,7 @@ static void elink_phy_def_cfg(struct elink_params *params,
speed_capability_mask));
}
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"Default config phy idx %x cfg 0x%x speed_cap_mask 0x%x",
phy_index, link_config, phy->speed_cap_mask);
@@ -11620,7 +11633,7 @@ elink_status_t elink_phy_probe(struct elink_params * params)
struct bnx2x_softc *sc = params->sc;
struct elink_phy *phy;
params->num_phys = 0;
- PMD_DRV_LOG(DEBUG, "Begin phy probe");
+ PMD_DRV_LOG(DEBUG, sc, "Begin phy probe");
phy_config_swapped = params->multi_phy_config &
PORT_HW_CFG_PHY_SWAPPED_ENABLED;
@@ -11633,7 +11646,7 @@ elink_status_t elink_phy_probe(struct elink_params * params)
else if (phy_index == ELINK_EXT_PHY2)
actual_phy_idx = ELINK_EXT_PHY1;
}
- PMD_DRV_LOG(DEBUG, "phy_config_swapped %x, phy_index %x,"
+ PMD_DRV_LOG(DEBUG, sc, "phy_config_swapped %x, phy_index %x,"
" actual_phy_idx %x", phy_config_swapped,
phy_index, actual_phy_idx);
phy = &params->phy[actual_phy_idx];
@@ -11641,7 +11654,7 @@ elink_status_t elink_phy_probe(struct elink_params * params)
params->shmem2_base, params->port,
phy) != ELINK_STATUS_OK) {
params->num_phys = 0;
- PMD_DRV_LOG(DEBUG, "phy probe failed in phy index %d",
+ PMD_DRV_LOG(DEBUG, sc, "phy probe failed in phy index %d",
phy_index);
for (phy_index = ELINK_INT_PHY;
phy_index < ELINK_MAX_PHYS; phy_index++)
@@ -11682,7 +11695,8 @@ elink_status_t elink_phy_probe(struct elink_params * params)
params->num_phys++;
}
- PMD_DRV_LOG(DEBUG, "End phy probe. #phys found %x", params->num_phys);
+ PMD_DRV_LOG(DEBUG, sc,
+ "End phy probe. #phys found %x", params->num_phys);
return ELINK_STATUS_OK;
}
@@ -11852,7 +11866,7 @@ static elink_status_t elink_avoid_link_flap(struct elink_params *params,
for (phy_idx = ELINK_INT_PHY; phy_idx < params->num_phys; phy_idx++) {
struct elink_phy *phy = &params->phy[phy_idx];
if (phy->phy_specific_func) {
- PMD_DRV_LOG(DEBUG, "Calling PHY specific func");
+ PMD_DRV_LOG(DEBUG, sc, "Calling PHY specific func");
phy->phy_specific_func(phy, params, ELINK_PHY_INIT);
}
if ((phy->media_type == ELINK_ETH_PHY_SFPP_10G_FIBER) ||
@@ -11972,12 +11986,13 @@ elink_status_t elink_phy_init(struct elink_params *params,
{
int lfa_status;
struct bnx2x_softc *sc = params->sc;
- PMD_DRV_LOG(DEBUG, "Phy Initialization started");
- PMD_DRV_LOG(DEBUG, "(1) req_speed %d, req_flowctrl %d",
+ PMD_DRV_LOG(DEBUG, sc, "Phy Initialization started");
+ PMD_DRV_LOG(DEBUG, sc, "(1) req_speed %d, req_flowctrl %d",
params->req_line_speed[0], params->req_flow_ctrl[0]);
- PMD_DRV_LOG(DEBUG, "(2) req_speed %d, req_flowctrl %d",
+ PMD_DRV_LOG(DEBUG, sc, "(2) req_speed %d, req_flowctrl %d",
params->req_line_speed[1], params->req_flow_ctrl[1]);
- PMD_DRV_LOG(DEBUG, "req_adv_flow_ctrl 0x%x", params->req_fc_auto_adv);
+ PMD_DRV_LOG(DEBUG, sc,
+ "req_adv_flow_ctrl 0x%x", params->req_fc_auto_adv);
vars->link_status = 0;
vars->phy_link_up = 0;
vars->link_up = 0;
@@ -11994,11 +12009,13 @@ elink_status_t elink_phy_init(struct elink_params *params,
lfa_status = elink_check_lfa(params);
if (lfa_status == 0) {
- PMD_DRV_LOG(DEBUG, "Link Flap Avoidance in progress");
+ PMD_DRV_LOG(DEBUG, sc,
+ "Link Flap Avoidance in progress");
return elink_avoid_link_flap(params, vars);
}
- PMD_DRV_LOG(DEBUG, "Cannot avoid link flap lfa_sta=0x%x", lfa_status);
+ PMD_DRV_LOG(DEBUG, sc,
+ "Cannot avoid link flap lfa_sta=0x%x", lfa_status);
elink_cannot_avoid_link_flap(params, vars, lfa_status);
/* Disable attentions */
@@ -12014,12 +12031,12 @@ elink_status_t elink_phy_init(struct elink_params *params,
vars->link_status |= LINK_STATUS_PFC_ENABLED;
if ((params->num_phys == 0) && !CHIP_REV_IS_SLOW(sc)) {
- PMD_DRV_LOG(DEBUG, "No phy found for initialization !!");
+ PMD_DRV_LOG(DEBUG, sc, "No phy found for initialization !!");
return ELINK_STATUS_ERROR;
}
set_phy_vars(params, vars);
- PMD_DRV_LOG(DEBUG, "Num of phys on board: %d", params->num_phys);
+ PMD_DRV_LOG(DEBUG, sc, "Num of phys on board: %d", params->num_phys);
switch (params->loopback_mode) {
case ELINK_LOOPBACK_BMAC:
@@ -12062,7 +12079,7 @@ static elink_status_t elink_link_reset(struct elink_params *params,
{
struct bnx2x_softc *sc = params->sc;
uint8_t phy_index, port = params->port, clear_latch_ind = 0;
- PMD_DRV_LOG(DEBUG, "Resetting the link of port %d", port);
+ PMD_DRV_LOG(DEBUG, sc, "Resetting the link of port %d", port);
/* Disable attentions */
vars->link_status = 0;
elink_update_mng(params, vars->link_status);
@@ -12240,7 +12257,7 @@ static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
port_of_path, &phy[port]) !=
ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "populate_phy failed");
+ PMD_DRV_LOG(DEBUG, sc, "populate_phy failed");
return ELINK_STATUS_ERROR;
}
/* Disable attentions */
@@ -12280,7 +12297,7 @@ static elink_status_t elink_8073_common_init_phy(struct bnx2x_softc *sc,
else
port_of_path = 0;
- PMD_DRV_LOG(DEBUG, "Loading spirom for phy address 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Loading spirom for phy address 0x%x",
phy_blk[port]->addr);
if (elink_8073_8727_external_rom_boot(sc, phy_blk[port],
port_of_path))
@@ -12364,7 +12381,7 @@ static elink_status_t elink_8726_common_init_phy(struct bnx2x_softc *sc,
/* Extract the ext phy address for the port */
if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
port, &phy) != ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "populate phy failed");
+ PMD_DRV_LOG(DEBUG, sc, "populate phy failed");
return ELINK_STATUS_ERROR;
}
@@ -12484,7 +12501,7 @@ static elink_status_t elink_8727_common_init_phy(struct bnx2x_softc *sc,
if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
port_of_path, &phy[port]) !=
ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "populate phy failed");
+ PMD_DRV_LOG(DEBUG, sc, "populate phy failed");
return ELINK_STATUS_ERROR;
}
/* disable attentions */
@@ -12515,7 +12532,7 @@ static elink_status_t elink_8727_common_init_phy(struct bnx2x_softc *sc,
port_of_path = port;
else
port_of_path = 0;
- PMD_DRV_LOG(DEBUG, "Loading spirom for phy address 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Loading spirom for phy address 0x%x",
phy_blk[port]->addr);
if (elink_8073_8727_external_rom_boot(sc, phy_blk[port],
port_of_path))
@@ -12542,7 +12559,8 @@ static elink_status_t elink_84833_common_init_phy(struct bnx2x_softc *sc,
DELAY(10);
elink_cb_gpio_mult_write(sc, reset_gpios,
MISC_REGISTERS_GPIO_OUTPUT_HIGH);
- PMD_DRV_LOG(DEBUG, "84833 reset pulse on pin values 0x%x", reset_gpios);
+ PMD_DRV_LOG(DEBUG, sc,
+ "84833 reset pulse on pin values 0x%x", reset_gpios);
return ELINK_STATUS_OK;
}
@@ -12590,7 +12608,7 @@ static elink_status_t elink_ext_phy_common_init(struct bnx2x_softc *sc,
rc = ELINK_STATUS_ERROR;
break;
default:
- PMD_DRV_LOG(DEBUG,
+ PMD_DRV_LOG(DEBUG, sc,
"ext_phy 0x%x common init not required",
ext_phy_type);
break;
@@ -12616,7 +12634,7 @@ elink_status_t elink_common_init_phy(struct bnx2x_softc * sc,
elink_set_mdio_clk(sc, GRCBASE_EMAC0);
elink_set_mdio_clk(sc, GRCBASE_EMAC1);
- PMD_DRV_LOG(DEBUG, "Begin common phy init");
+ PMD_DRV_LOG(DEBUG, sc, "Begin common phy init");
if (CHIP_IS_E3(sc)) {
/* Enable EPIO */
val = REG_RD(sc, MISC_REG_GEN_PURP_HWG);
@@ -12627,7 +12645,7 @@ elink_status_t elink_common_init_phy(struct bnx2x_softc * sc,
offsetof(struct shmem_region,
port_mb[PORT_0].ext_phy_fw_version));
if (phy_ver) {
- PMD_DRV_LOG(DEBUG, "Not doing common init; phy ver is 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Not doing common init; phy ver is 0x%x",
phy_ver);
return ELINK_STATUS_OK;
}
@@ -12699,15 +12717,15 @@ static uint8_t elink_analyze_link_error(struct elink_params *params,
/* If values differ */
switch (phy_flag) {
case PHY_HALF_OPEN_CONN_FLAG:
- PMD_DRV_LOG(DEBUG, "Analyze Remote Fault");
+ PMD_DRV_LOG(DEBUG, sc, "Analyze Remote Fault");
break;
case PHY_SFP_TX_FAULT_FLAG:
- PMD_DRV_LOG(DEBUG, "Analyze TX Fault");
+ PMD_DRV_LOG(DEBUG, sc, "Analyze TX Fault");
break;
default:
- PMD_DRV_LOG(DEBUG, "Analyze UNKNOWN");
+ PMD_DRV_LOG(DEBUG, sc, "Analyze UNKNOWN");
}
- PMD_DRV_LOG(DEBUG, "Link changed:[%x %x]->%x", vars->link_up,
+ PMD_DRV_LOG(DEBUG, sc, "Link changed:[%x %x]->%x", vars->link_up,
old_status, status);
/* a. Update shmem->link_status accordingly
@@ -12833,7 +12851,7 @@ static void elink_sfp_tx_fault_detection(struct elink_phy *phy,
PORT_HW_CFG_E3_TX_FAULT_SHIFT;
if (elink_get_cfg_pin(sc, cfg_pin, &value)) {
- PMD_DRV_LOG(DEBUG, "Failed to read pin 0x%02x", cfg_pin);
+ PMD_DRV_LOG(DEBUG, sc, "Failed to read pin 0x%02x", cfg_pin);
return;
}
@@ -12855,7 +12873,7 @@ static void elink_sfp_tx_fault_detection(struct elink_phy *phy,
/* If module is unapproved, led should be on regardless */
if (!(phy->flags & ELINK_FLAGS_SFP_NOT_APPROVED)) {
- PMD_DRV_LOG(DEBUG, "Change TX_Fault LED: ->%x",
+ PMD_DRV_LOG(DEBUG, sc, "Change TX_Fault LED: ->%x",
led_mode);
elink_set_e3_module_fault_led(params, led_mode);
}
@@ -12865,7 +12883,7 @@ static void elink_sfp_tx_fault_detection(struct elink_phy *phy,
static void elink_kr2_recovery(struct elink_params *params,
struct elink_vars *vars, struct elink_phy *phy)
{
- PMD_DRV_LOG(DEBUG, "KR2 recovery");
+ PMD_DRV_LOG(DEBUG, params->sc, "KR2 recovery");
elink_warpcore_enable_AN_KR2(phy, params, vars);
elink_warpcore_restart_AN_KR(phy, params);
@@ -12892,7 +12910,7 @@ static void elink_check_kr2_wa(struct elink_params *params,
if (!sigdet) {
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
elink_kr2_recovery(params, vars, phy);
- PMD_DRV_LOG(DEBUG, "No sigdet");
+ PMD_DRV_LOG(DEBUG, sc, "No sigdet");
}
return;
}
@@ -12910,7 +12928,7 @@ static void elink_check_kr2_wa(struct elink_params *params,
if (base_page == 0) {
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
elink_kr2_recovery(params, vars, phy);
- PMD_DRV_LOG(DEBUG, "No BP");
+ PMD_DRV_LOG(DEBUG, sc, "No BP");
}
return;
}
@@ -12926,7 +12944,7 @@ static void elink_check_kr2_wa(struct elink_params *params,
/* In case KR2 is already disabled, check if we need to re-enable it */
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
if (!not_kr2_device) {
- PMD_DRV_LOG(DEBUG, "BP=0x%x, NP=0x%x", base_page,
+ PMD_DRV_LOG(DEBUG, sc, "BP=0x%x, NP=0x%x", base_page,
next_page);
elink_kr2_recovery(params, vars, phy);
}
@@ -12935,7 +12953,8 @@ static void elink_check_kr2_wa(struct elink_params *params,
/* KR2 is enabled, but not KR2 device */
if (not_kr2_device) {
/* Disable KR2 on both lanes */
- PMD_DRV_LOG(DEBUG, "BP=0x%x, NP=0x%x", base_page, next_page);
+ PMD_DRV_LOG(DEBUG, sc,
+ "BP=0x%x, NP=0x%x", base_page, next_page);
elink_disable_kr2(params, vars, phy);
/* Restart AN on leading lane */
elink_warpcore_restart_AN_KR(phy, params);
@@ -12952,7 +12971,7 @@ void elink_period_func(struct elink_params *params, struct elink_vars *vars)
elink_set_aer_mmd(params, &params->phy[phy_idx]);
if (elink_check_half_open_conn(params, vars, 1) !=
ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "Fault detection failed");
+ PMD_DRV_LOG(DEBUG, sc, "Fault detection failed");
}
break;
}
@@ -12998,7 +13017,7 @@ uint8_t elink_fan_failure_det_req(struct bnx2x_softc *sc,
if (elink_populate_phy(sc, phy_index, shmem_base, shmem2_base,
port, &phy)
!= ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "populate phy failed");
+ PMD_DRV_LOG(DEBUG, sc, "populate phy failed");
return 0;
}
fan_failure_det_req |= (phy.flags &
@@ -13048,7 +13067,7 @@ void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
if (elink_populate_phy(sc, phy_index, shmem_base,
shmem2_base, port, &phy)
!= ELINK_STATUS_OK) {
- PMD_DRV_LOG(DEBUG, "populate phy failed");
+ PMD_DRV_LOG(DEBUG, sc, "populate phy failed");
return;
}
if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8726) {
@@ -13078,7 +13097,7 @@ void elink_init_mod_abs_int(struct bnx2x_softc *sc, struct elink_vars *vars,
dev_info.port_hw_config[port].aeu_int_mask);
REG_WR(sc, sync_offset, vars->aeu_int_mask);
- PMD_DRV_LOG(DEBUG, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x",
+ PMD_DRV_LOG(DEBUG, sc, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x",
gpio_num, gpio_port, vars->aeu_int_mask);
if (port == 0)
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 7466a642..10911214 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -204,7 +204,9 @@ static int bnxt_init_chip(struct bnxt *bp)
unsigned int i, rss_idx, fw_idx;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint64_t rx_offloads = dev_conf->rxmode.offloads;
uint32_t intr_vector = 0;
uint32_t queue_id, base = BNXT_MISC_VEC_ID;
uint32_t vec = BNXT_MISC_VEC_ID;
@@ -275,6 +277,16 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
+ /*
+ * Firmware sets pf pair in default vnic cfg. If the VLAN strip
+ * setting is not available at this time, it will not be
+ * configured correctly in the CFA.
+ */
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ vnic->vlan_strip = true;
+ else
+ vnic->vlan_strip = false;
+
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index db3222f4..815bad97 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -396,6 +396,8 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
req.flags = rte_cpu_to_le_32(filter->flags);
+ req.flags |=
+ rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
enables = filter->enables |
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
@@ -2744,7 +2746,7 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
HWRM_PREP(req, FUNC_VF_CFG);
req.enables = rte_cpu_to_le_32(
- HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
+ HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = rte_cpu_to_le_16(
bp->def_cp_ring->cp_ring_struct->fw_ring_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index f5ed03f1..e558413e 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -146,7 +146,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
{
struct bnxt_tx_ring_info *txr = txq->tx_ring;
struct tx_bd_long *txbd;
- struct tx_bd_long_hi *txbd1;
+ struct tx_bd_long_hi *txbd1 = NULL;
uint32_t vlan_tag_flags, cfa_action;
bool long_bd = false;
uint16_t last_prod = 0;
@@ -314,7 +314,8 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
}
txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
- txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
+ if (txbd1)
+ txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index 8880231e..44cf61b8 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -64,7 +64,8 @@ get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
{
size_t vlan_offset = 0;
- if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
+ if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto ||
+ rte_cpu_to_be_16(ETHER_TYPE_QINQ) == *proto) {
struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
vlan_offset = sizeof(struct vlan_hdr);
@@ -84,28 +85,34 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
struct bond_dev_private *internals;
- uint16_t num_rx_slave = 0;
uint16_t num_rx_total = 0;
-
+ uint16_t slave_count;
+ uint16_t active_slave;
int i;
/* Cast to structure, containing bonded device's port id and queue id */
struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue;
-
internals = bd_rx_q->dev_private;
+ slave_count = internals->active_slave_count;
+ active_slave = internals->active_slave;
+ for (i = 0; i < slave_count && nb_pkts; i++) {
+ uint16_t num_rx_slave;
- for (i = 0; i < internals->active_slave_count && nb_pkts; i++) {
/* Offset of pointer to *bufs increases as packets are received
* from other slaves */
- num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i],
- bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts);
- if (num_rx_slave) {
- num_rx_total += num_rx_slave;
- nb_pkts -= num_rx_slave;
- }
+ num_rx_slave =
+ rte_eth_rx_burst(internals->active_slaves[active_slave],
+ bd_rx_q->queue_id,
+ bufs + num_rx_total, nb_pkts);
+ num_rx_total += num_rx_slave;
+ nb_pkts -= num_rx_slave;
+ if (++active_slave == slave_count)
+ active_slave = 0;
}
+ if (++internals->active_slave == slave_count)
+ internals->active_slave = 0;
return num_rx_total;
}
@@ -284,25 +291,32 @@ bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs,
uint16_t num_rx_total = 0; /* Total number of received packets */
uint16_t slaves[RTE_MAX_ETHPORTS];
uint16_t slave_count;
-
- uint16_t i, idx;
+ uint16_t active_slave;
+ uint16_t i;
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
slave_count = internals->active_slave_count;
+ active_slave = internals->active_slave;
memcpy(slaves, internals->active_slaves,
sizeof(internals->active_slaves[0]) * slave_count);
- for (i = 0, idx = internals->active_slave;
- i < slave_count && num_rx_total < nb_pkts; i++, idx++) {
- idx = idx % slave_count;
+ for (i = 0; i < slave_count && nb_pkts; i++) {
+ uint16_t num_rx_slave;
/* Read packets from this slave */
- num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
- &bufs[num_rx_total], nb_pkts - num_rx_total);
+ num_rx_slave = rte_eth_rx_burst(slaves[active_slave],
+ bd_rx_q->queue_id,
+ bufs + num_rx_total, nb_pkts);
+ num_rx_total += num_rx_slave;
+ nb_pkts -= num_rx_slave;
+
+ if (++active_slave == slave_count)
+ active_slave = 0;
}
- internals->active_slave = idx;
+ if (++internals->active_slave == slave_count)
+ internals->active_slave = 0;
return num_rx_total;
}
@@ -480,7 +494,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
idx = 0;
}
- internals->active_slave = idx;
+ if (++internals->active_slave == slave_count)
+ internals->active_slave = 0;
+
return num_rx_total;
}
@@ -1688,12 +1704,11 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
/* If RSS is enabled for bonding, try to enable it for slaves */
if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
- if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
- != 0) {
+ if (internals->rss_key_len != 0) {
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
- bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
+ internals->rss_key_len;
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key =
- bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+ internals->rss_key;
} else {
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
}
@@ -2048,12 +2063,20 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev)
tlb_last_obytets[internals->active_slaves[i]] = 0;
}
- internals->link_status_polling_enabled = 0;
- for (i = 0; i < internals->slave_count; i++)
- internals->slaves[i].last_link_status = 0;
-
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
eth_dev->data->dev_started = 0;
+
+ internals->link_status_polling_enabled = 0;
+ for (i = 0; i < internals->slave_count; i++) {
+ uint16_t slave_id = internals->slaves[i].port_id;
+ if (find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count, slave_id) !=
+ internals->active_slave_count) {
+ internals->slaves[i].last_link_status = 0;
+ rte_eth_dev_stop(slave_id);
+ deactivate_slave(eth_dev, slave_id);
+ }
+ }
}
void
@@ -2926,9 +2949,16 @@ bond_probe(struct rte_vdev_device *dev)
goto parse_error;
}
- if (internals->mode == BONDING_MODE_8023AD)
- rte_eth_bond_8023ad_agg_selection_set(port_id,
+ if (internals->mode == BONDING_MODE_8023AD) {
+ int ret = rte_eth_bond_8023ad_agg_selection_set(port_id,
agg_mode);
+ if (ret < 0) {
+ RTE_BOND_LOG(ERR,
+ "Invalid args for agg selection set "
+ "for bonded device %s", name);
+ return -1;
+ }
+ }
} else {
rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE);
}
@@ -3016,16 +3046,30 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
unsigned i, j;
- /* If RSS is enabled, fill table and key with default values */
+ /*
+ * If RSS is enabled, fill table with default values and
+ * set key to the the value specified in port RSS configuration.
+ * Fall back to default RSS key if the key is not specified
+ */
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key;
- dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0;
- memcpy(internals->rss_key, default_rss_key, 40);
+ if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) {
+ internals->rss_key_len =
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
+ memcpy(internals->rss_key,
+ dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key,
+ internals->rss_key_len);
+ } else {
+ internals->rss_key_len = sizeof(default_rss_key);
+ memcpy(internals->rss_key, default_rss_key,
+ internals->rss_key_len);
+ }
for (i = 0; i < RTE_DIM(internals->reta_conf); i++) {
internals->reta_conf[i].mask = ~0LL;
for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
- internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues;
+ internals->reta_conf[i].reta[j] =
+ (i * RTE_RETA_GROUP_SIZE + j) %
+ dev->data->nb_rx_queues;
}
}
diff --git a/drivers/net/e1000/base/e1000_i210.c b/drivers/net/e1000/base/e1000_i210.c
index 277331c4..c2abb43f 100644
--- a/drivers/net/e1000/base/e1000_i210.c
+++ b/drivers/net/e1000/base/e1000_i210.c
@@ -941,6 +941,7 @@ STATIC s32 e1000_pll_workaround_i210(struct e1000_hw *hw)
if (ret_val != E1000_SUCCESS)
nvm_word = E1000_INVM_DEFAULT_AL;
tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+ phy_word = E1000_PHY_PLL_UNCONF;
for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
/* check current state directly from internal PHY */
e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 1d8f0794..a15ce222 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1369,12 +1369,13 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev,
}
/*
- * EM devices don't support drop_en functionality
+ * EM devices don't support drop_en functionality.
+ * It's an optimization that does nothing on single-queue devices,
+ * so just log the issue and carry on.
*/
if (rx_conf->rx_drop_en) {
- PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
+ PMD_INIT_LOG(NOTICE, "drop_en functionality not supported by "
"device");
- return -EINVAL;
}
/* Free memory prior to re-allocation if needed. */
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index 4ee12e9e..b766a5d7 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -79,6 +79,10 @@
#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IGB_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 4e526567..6b96f404 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -1573,7 +1573,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* fill mbuf attributes if any */
ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx);
- mbuf_head->hash.rss = (uint32_t)rx_ring->id;
+ mbuf_head->hash.rss = ena_rx_ctx.hash;
/* pass to DPDK application head mbuf */
rx_pkts[recv_idx] = mbuf_head;
diff --git a/drivers/net/enic/base/vnic_devcmd.h b/drivers/net/enic/base/vnic_devcmd.h
index 05d87b91..4d7a85d6 100644
--- a/drivers/net/enic/base/vnic_devcmd.h
+++ b/drivers/net/enic/base/vnic_devcmd.h
@@ -880,7 +880,7 @@ struct filter_action_v2 {
u32 rq_idx;
u32 flags; /* use FILTER_ACTION_XXX_FLAG defines */
u16 filter_id;
- u_int8_t reserved[32]; /* for future expansion */
+ uint8_t reserved[32]; /* for future expansion */
} __attribute__((packed));
/* Specifies the filter type. */
@@ -946,9 +946,9 @@ enum {
};
struct filter_tlv {
- u_int32_t type;
- u_int32_t length;
- u_int32_t val[0];
+ uint32_t type;
+ uint32_t length;
+ uint32_t val[0];
};
/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
@@ -962,10 +962,10 @@ struct filter_tlv {
* drivers should use this instead of "sizeof (struct filter_v2)" when
* computing length for TLV.
*/
-static inline u_int32_t
+static inline uint32_t
vnic_filter_size(struct filter_v2 *fp)
{
- u_int32_t size;
+ uint32_t size;
switch (fp->type) {
case FILTER_USNIC_ID:
@@ -1004,10 +1004,10 @@ enum {
* drivers should use this instead of "sizeof (struct filter_action_v2)"
* when computing length for TLV.
*/
-static inline u_int32_t
+static inline uint32_t
vnic_action_size(struct filter_action_v2 *fap)
{
- u_int32_t size;
+ uint32_t size;
switch (fap->type) {
case FILTER_ACTION_RQ_STEERING:
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c
index a728d077..407b36e2 100644
--- a/drivers/net/enic/enic_flow.c
+++ b/drivers/net/enic/enic_flow.c
@@ -1503,6 +1503,7 @@ enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
enic_flow_del_filter(enic, flow->enic_filter_id, error);
LIST_REMOVE(flow, next);
rte_spinlock_unlock(&enic->flows_lock);
+ rte_free(flow);
return 0;
}
@@ -1526,6 +1527,7 @@ enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
flow = LIST_FIRST(&enic->flows);
enic_flow_del_filter(enic, flow->enic_filter_id, error);
LIST_REMOVE(flow, next);
+ rte_free(flow);
}
rte_spinlock_unlock(&enic->flows_lock);
return 0;
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 831c90a1..f8dd09c3 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -246,10 +246,12 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
} else {
- if (vlan_tci != 0)
+ if (vlan_tci != 0) {
+ pkt_flags |= PKT_RX_VLAN;
mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
- else
+ } else {
mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
+ }
}
mbuf->vlan_tci = vlan_tci;
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 9a5d8733..7eb6f719 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -260,9 +260,13 @@ fs_rx_queue_release(void *queue)
return;
rxq = queue;
dev = rxq->priv->dev;
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- SUBOPS(sdev, rx_queue_release)
- (ETH(sdev)->data->rx_queues[rxq->qid]);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ if (ETH(sdev)->data->rx_queues != NULL &&
+ ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
+ SUBOPS(sdev, rx_queue_release)
+ (ETH(sdev)->data->rx_queues[rxq->qid]);
+ }
+ }
dev->data->rx_queues[rxq->qid] = NULL;
rte_free(rxq);
}
@@ -280,6 +284,11 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
uint8_t i;
int ret;
+ if (rx_conf->rx_deferred_start) {
+ ERROR("Rx queue deferred start is not supported");
+ return -EINVAL;
+ }
+
rxq = dev->data->rx_queues[rx_queue_id];
if (rxq != NULL) {
fs_rx_queue_release(rxq);
@@ -328,9 +337,13 @@ fs_tx_queue_release(void *queue)
return;
txq = queue;
dev = txq->priv->dev;
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- SUBOPS(sdev, tx_queue_release)
- (ETH(sdev)->data->tx_queues[txq->qid]);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ if (ETH(sdev)->data->tx_queues != NULL &&
+ ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
+ SUBOPS(sdev, tx_queue_release)
+ (ETH(sdev)->data->tx_queues[txq->qid]);
+ }
+ }
dev->data->tx_queues[txq->qid] = NULL;
rte_free(txq);
}
@@ -347,6 +360,11 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
uint8_t i;
int ret;
+ if (tx_conf->tx_deferred_start) {
+ ERROR("Tx queue deferred start is not supported");
+ return -EINVAL;
+ }
+
txq = dev->data->tx_queues[tx_queue_id];
if (txq != NULL) {
fs_tx_queue_release(txq);
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 58dac389..a0263f6d 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -483,11 +483,6 @@ fm10k_dev_configure(struct rte_eth_dev *dev)
return 0;
}
-/* fls = find last set bit = 32 minus the number of leading zeros */
-#ifndef fls
-#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
-#endif
-
static void
fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
{
@@ -1061,8 +1056,8 @@ fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
nb_queue_pools = macvlan->nb_queue_pools;
- pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
- rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
+ pool_len = nb_queue_pools ? rte_fls_u32(nb_queue_pools - 1) : 0;
+ rss_len = rte_fls_u32(dev->data->nb_rx_queues - 1) - pool_len;
/* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
@@ -1073,7 +1068,7 @@ fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
/* Flow Director configurations, only queue number is valid. */
- dglortdec = fls(dev->data->nb_rx_queues - 1);
+ dglortdec = rte_fls_u32(dev->data->nb_rx_queues - 1);
dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
(hw->mac.dglort_map + GLORT_FD_Q_BASE);
FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index 8cc8c5ec..4cf641b4 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -126,6 +126,7 @@ enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
**/
void i40e_free_adminq_asq(struct i40e_hw *hw)
{
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
@@ -433,7 +434,7 @@ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
/* initialize base registers */
ret_code = i40e_config_asq_regs(hw);
if (ret_code != I40E_SUCCESS)
- goto init_adminq_free_rings;
+ goto init_config_regs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
@@ -441,6 +442,10 @@ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
init_adminq_free_rings:
i40e_free_adminq_asq(hw);
+ return ret_code;
+
+init_config_regs:
+ i40e_free_asq_bufs(hw);
init_adminq_exit:
return ret_code;
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index c36da2a3..ca4fa23c 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -1430,8 +1430,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
};
/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when
- * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set. refer to
- * DCR288
+ * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set.
*/
struct i40e_aqc_add_rm_cloud_filt_elem_ext {
struct i40e_aqc_add_remove_cloud_filters_element_data element;
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 900d379c..21102fcb 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -1346,7 +1346,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- grst_del = grst_del * 20;
+ grst_del = min(grst_del * 20, 160U);
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
diff --git a/drivers/net/i40e/base/i40e_lan_hmc.c b/drivers/net/i40e/base/i40e_lan_hmc.c
index f03f3813..52c5f810 100644
--- a/drivers/net/i40e/base/i40e_lan_hmc.c
+++ b/drivers/net/i40e/base/i40e_lan_hmc.c
@@ -143,7 +143,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
txq_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -166,7 +166,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
rxq_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -189,7 +189,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
fcoe_cntx_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -212,7 +212,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
fcoe_filt_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -233,7 +233,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
(sizeof(struct i40e_hmc_sd_entry) *
hw->hmc.sd_table.sd_cnt));
if (ret_code)
- goto init_lan_hmc_out;
+ goto free_hmc_out;
hw->hmc.sd_table.sd_entry =
(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
}
@@ -242,6 +242,11 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
init_lan_hmc_out:
return ret_code;
+free_hmc_out:
+ if (hw->hmc.hmc_obj_virt_mem.va)
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+
+ return ret_code;
}
/**
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 711c6e7b..500673a9 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1224,9 +1224,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Make sure all is clean before doing PF reset */
i40e_clear_hw(hw);
- /* Initialize the hardware */
- i40e_hw_init(dev);
-
/* Reset here to make sure all is clean for each PF */
ret = i40e_pf_reset(hw);
if (ret) {
@@ -1241,6 +1238,23 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
return ret;
}
+ /* Initialize the parameters for adminq */
+ i40e_init_adminq_parameter(hw);
+ ret = i40e_init_adminq(hw);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
+ return -EIO;
+ }
+ PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack);
+
+ /* Initialize the hardware */
+ i40e_hw_init(dev);
+
i40e_config_automask(pf);
i40e_set_default_pctype_table(dev);
@@ -1257,20 +1271,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Initialize the input set for filters (hash and fd) to default value */
i40e_filter_input_set_init(pf);
- /* Initialize the parameters for adminq */
- i40e_init_adminq_parameter(hw);
- ret = i40e_init_adminq(hw);
- if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
- return -EIO;
- }
- PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x",
- hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
- hw->aq.api_maj_ver, hw->aq.api_min_ver,
- ((hw->nvm.version >> 12) & 0xf),
- ((hw->nvm.version >> 4) & 0xff),
- (hw->nvm.version & 0xf), hw->nvm.eetrack);
-
/* initialise the L3_MAP register */
if (!pf->support_multi_driver) {
ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
@@ -2419,6 +2419,10 @@ i40e_dev_promiscuous_disable(struct rte_eth_dev *dev)
if (status != I40E_SUCCESS)
PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous");
+ /* must remain in all_multicast mode */
+ if (dev->data->all_multicast == 1)
+ return;
+
status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
false, NULL);
if (status != I40E_SUCCESS)
@@ -2480,7 +2484,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
}
static __rte_always_inline void
-update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link)
+update_link_reg(struct i40e_hw *hw, struct rte_eth_link *link)
{
/* Link status registers and values*/
#define I40E_PRTMAC_LINKSTA 0x001E2420
@@ -2534,8 +2538,8 @@ update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link)
}
static __rte_always_inline void
-update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link,
- bool enable_lse)
+update_link_aq(struct i40e_hw *hw, struct rte_eth_link *link,
+ bool enable_lse, int wait_to_complete)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */
@@ -2557,7 +2561,7 @@ update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link,
}
link->link_status = link_status.link_info & I40E_AQ_LINK_UP;
- if (unlikely(link->link_status != 0))
+ if (!wait_to_complete || link->link_status)
break;
rte_delay_ms(CHECK_INTERVAL);
@@ -2607,10 +2611,10 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
- if (!wait_to_complete)
- update_link_no_wait(hw, &link);
+ if (!wait_to_complete && !enable_lse)
+ update_link_reg(hw, &link);
else
- update_link_wait(hw, &link, enable_lse);
+ update_link_aq(hw, &link, enable_lse, wait_to_complete);
rte_i40e_dev_atomic_write_link_status(dev, &link);
if (link.link_status == old.link_status)
@@ -5075,7 +5079,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf)
int ret;
/* Use the FW API if FW >= v5.0 */
- if (hw->aq.fw_maj_ver < 5) {
+ if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
return;
}
@@ -5346,7 +5350,7 @@ i40e_vsi_setup(struct i40e_pf *pf,
ctxt.flags = I40E_AQ_VSI_TYPE_VF;
/* Use the VEB configuration if FW >= v5.0 */
- if (hw->aq.fw_maj_ver >= 5) {
+ if (hw->aq.fw_maj_ver >= 5 || hw->mac.type == I40E_MAC_X722) {
/* Configure switch ID */
ctxt.info.valid_sections |=
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID);
@@ -11201,6 +11205,32 @@ i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
return 0;
}
+/**
+ * This function is used to check if the register is valid.
+ * Below is the valid registers list for X722 only:
+ * 0x2b800--0x2bb00
+ * 0x38700--0x38a00
+ * 0x3d800--0x3db00
+ * 0x208e00--0x209000
+ * 0x20be00--0x20c000
+ * 0x263c00--0x264000
+ * 0x265c00--0x266000
+ */
+static inline int i40e_valid_regs(enum i40e_mac_type type, uint32_t reg_offset)
+{
+ if ((type != I40E_MAC_X722) &&
+ ((reg_offset >= 0x2b800 && reg_offset <= 0x2bb00) ||
+ (reg_offset >= 0x38700 && reg_offset <= 0x38a00) ||
+ (reg_offset >= 0x3d800 && reg_offset <= 0x3db00) ||
+ (reg_offset >= 0x208e00 && reg_offset <= 0x209000) ||
+ (reg_offset >= 0x20be00 && reg_offset <= 0x20c000) ||
+ (reg_offset >= 0x263c00 && reg_offset <= 0x264000) ||
+ (reg_offset >= 0x265c00 && reg_offset <= 0x266000)))
+ return 0;
+ else
+ return 1;
+}
+
static int i40e_get_regs(struct rte_eth_dev *dev,
struct rte_dev_reg_info *regs)
{
@@ -11242,8 +11272,11 @@ static int i40e_get_regs(struct rte_eth_dev *dev,
reg_offset = arr_idx * reg_info->stride1 +
arr_idx2 * reg_info->stride2;
reg_offset += reg_info->base_addr;
- ptr_data[reg_offset >> 2] =
- I40E_READ_REG(hw, reg_offset);
+ if (!i40e_valid_regs(hw->mac.type, reg_offset))
+ ptr_data[reg_offset >> 2] = 0;
+ else
+ ptr_data[reg_offset >> 2] =
+ I40E_READ_REG(hw, reg_offset);
}
}
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 078b405a..fb35d122 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -87,6 +87,10 @@
PKT_TX_OUTER_IP_CKSUM)
#define I40E_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_IPV4 | \
+ PKT_TX_IPV6 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_OUTER_IP_CKSUM | \
@@ -97,7 +101,7 @@
I40E_TX_IEEE1588_TMST)
#define I40E_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
+ ~(PKT_TX_OFFLOAD_MASK & I40E_TX_OFFLOAD_MASK)
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
index 2d25873d..5771645c 100644
--- a/drivers/net/i40e/rte_pmd_i40e.c
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -367,7 +367,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
hw = I40E_VSI_TO_HW(vsi);
/* Use the FW API if FW >= v5.0 */
- if (hw->aq.fw_maj_ver < 5) {
+ if (hw->aq.fw_maj_ver < 5 && hw->mac.type != I40E_MAC_X722) {
PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
return -ENOTSUP;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index 5e6ad952..4076dd56 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -5147,10 +5147,10 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_flap_tx_laser(hw);
/* Wait for the controller to acquire link. Per IEEE 802.3ap,
- * Section 73.10.2, we may have to wait up to 500ms if KR is
+ * Section 73.10.2, we may have to wait up to 1000ms if KR is
* attempted. 82599 uses the same timing for 10g SFI.
*/
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < 10; i++) {
/* Wait for the link partner to also set speed */
msec_delay(100);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index d7eb4580..f047db83 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -254,6 +254,8 @@ static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
struct rte_intr_handle *handle);
static void ixgbe_dev_interrupt_handler(void *param);
static void ixgbe_dev_interrupt_delayed_handler(void *param);
+static void ixgbe_dev_setup_link_alarm_handler(void *param);
+
static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool);
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
@@ -2531,6 +2533,9 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
return -EINVAL;
}
+ /* Stop the link setup handler before resetting the HW. */
+ rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
+
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
@@ -2732,6 +2737,12 @@ skip_link_setup:
"please call hierarchy_commit() "
"before starting the port");
+ /*
+ * Update link status right before return, because it may
+ * start link configuration process in a separate thread.
+ */
+ ixgbe_dev_link_update(dev, 0);
+
return 0;
error:
@@ -2759,6 +2770,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
+
/* disable interrupts */
ixgbe_disable_intr(hw);
@@ -3865,11 +3878,6 @@ static int
ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
int *link_up, int wait_to_complete)
{
- /**
- * for a quick link status checking, wait_to_compelet == 0,
- * skip PF link status checking
- */
- bool no_pflink_check = wait_to_complete == 0;
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
uint32_t links_reg, in_msg;
@@ -3930,14 +3938,6 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
*speed = IXGBE_LINK_SPEED_UNKNOWN;
}
- if (no_pflink_check) {
- if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
- mac->get_link_status = true;
- else
- mac->get_link_status = false;
-
- goto out;
- }
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
@@ -3947,7 +3947,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
/* msg is not CTS and is NACK we must have lost CTS status */
if (in_msg & IXGBE_VT_MSGTYPE_NACK)
- ret_val = -1;
+ mac->get_link_status = false;
goto out;
}
@@ -3967,6 +3967,25 @@ out:
return ret_val;
}
+static void
+ixgbe_dev_setup_link_alarm_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ u32 speed;
+ bool autoneg = false;
+
+ speed = hw->phy.autoneg_advertised;
+ if (!speed)
+ ixgbe_get_link_capabilities(hw, &speed, &autoneg);
+
+ ixgbe_setup_link(hw, speed, true);
+
+ intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+}
+
/* return 0 means link status changed, -1 means not changed */
static int
ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
@@ -3979,9 +3998,7 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int link_up;
int diag;
- u32 speed = 0;
int wait = 1;
- bool autoneg = false;
link.link_status = ETH_LINK_DOWN;
link.link_speed = 0;
@@ -3992,12 +4009,11 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
hw->mac.get_link_status = true;
- if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
- ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
- speed = hw->phy.autoneg_advertised;
- if (!speed)
- ixgbe_get_link_capabilities(hw, &speed, &autoneg);
- ixgbe_setup_link(hw, speed, true);
+ if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) {
+ rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+ if (link.link_status == old.link_status)
+ return -1;
+ return 0;
}
/* check if it needs to wait to complete, if lsc interrupt is enabled */
@@ -4020,12 +4036,16 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
if (link_up == 0) {
rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
+ intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ rte_eal_alarm_set(10,
+ ixgbe_dev_setup_link_alarm_handler, dev);
+ }
if (link.link_status == old.link_status)
return -1;
return 0;
}
- intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -5031,6 +5051,9 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ /* Stop the link setup handler before resetting the HW. */
+ rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
+
err = hw->mac.ops.reset_hw(hw);
if (err) {
PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
@@ -5103,6 +5126,12 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
/* Re-enable interrupt for VF */
ixgbevf_intr_enable(hw);
+ /*
+ * Update link status right before return, because it may
+ * start link configuration process in a separate thread.
+ */
+ ixgbevf_dev_link_update(dev, 0);
+
return 0;
}
@@ -5115,6 +5144,8 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev);
+
ixgbevf_intr_disable(hw);
hw->adapter_stopped = 1;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index 9bc84624..651b5e8d 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -87,6 +87,10 @@
#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c
index 89f552c8..170e2cb1 100644
--- a/drivers/net/mlx4/mlx4_ethdev.c
+++ b/drivers/net/mlx4/mlx4_ethdev.c
@@ -386,6 +386,8 @@ mlx4_rxmode_toggle(struct rte_eth_dev *dev, enum rxmode_toggle toggle)
mode = "all multicast";
dev->data->all_multicast = toggle & 1;
break;
+ default:
+ mode = "undefined";
}
if (!mlx4_flow_sync(priv, &error))
return;
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 36f3a056..e117ec84 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -78,6 +78,12 @@
*/
#define MLX5_TXQS_MIN_INLINE "txqs_min_inline"
+/*
+ * Device parameter to configure the number of TX queues threshold for
+ * enabling vectorized Tx.
+ */
+#define MLX5_TXQS_MAX_VEC "txqs_max_vec"
+
/* Device parameter to enable multi-packet send WQEs. */
#define MLX5_TXQ_MPW_EN "txq_mpw_en"
@@ -112,6 +118,7 @@ struct mlx5_args {
int cqe_comp;
int txq_inline;
int txqs_inline;
+ int txqs_vec;
int mps;
int mpw_hdr_dseg;
int inline_max_packet_sz;
@@ -236,6 +243,7 @@ mlx5_dev_close(struct rte_eth_dev *dev)
priv->txqs_n = 0;
priv->txqs = NULL;
}
+ mlx5_mr_deregister_memseg(dev);
if (priv->pd != NULL) {
assert(priv->ctx != NULL);
claim_zero(ibv_dealloc_pd(priv->pd));
@@ -276,10 +284,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
if (ret)
DRV_LOG(WARNING, "port %u some flows still remain",
dev->data->port_id);
- ret = mlx5_mr_verify(dev);
- if (ret)
- DRV_LOG(WARNING, "port %u some memory region still remain",
- dev->data->port_id);
memset(priv, 0, sizeof(*priv));
}
@@ -442,6 +446,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
args->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
args->txqs_inline = tmp;
+ } else if (strcmp(MLX5_TXQS_MAX_VEC, key) == 0) {
+ args->txqs_vec = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
args->mps = !!tmp;
} else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
@@ -480,6 +486,7 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
MLX5_RXQ_CQE_COMP_EN,
MLX5_TXQ_INLINE,
MLX5_TXQS_MIN_INLINE,
+ MLX5_TXQS_MAX_VEC,
MLX5_TXQ_MPW_EN,
MLX5_TXQ_MPW_HDR_DSEG_EN,
MLX5_TXQ_MAX_INLINE_LEN,
@@ -640,8 +647,17 @@ mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
priv->txq_inline = args->txq_inline;
if (args->txqs_inline != MLX5_ARG_UNSET)
priv->txqs_inline = args->txqs_inline;
- if (args->mps != MLX5_ARG_UNSET)
+ if (args->txqs_vec != MLX5_ARG_UNSET)
+ priv->txqs_vec = args->txqs_vec;
+ if (args->mps != MLX5_ARG_UNSET) {
priv->mps = args->mps ? priv->mps : 0;
+ } else if (priv->mps == MLX5_MPW) {
+ /*
+ * MPW is disabled by default, while the Enhanced MPW is enabled
+ * by default.
+ */
+ priv->mps = MLX5_MPW_DISABLED;
+ }
if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
@@ -680,6 +696,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
unsigned int mps;
unsigned int cqe_comp;
unsigned int tunnel_en = 0;
+ unsigned int txqs_vec = MLX5_VPMD_MAX_TXQS;
int idx;
int i;
struct mlx5dv_context attrs_out;
@@ -726,8 +743,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
continue;
switch (pci_dev->id.device_id) {
case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
- tunnel_en = 1;
- break;
case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
@@ -735,6 +750,10 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
tunnel_en = 1;
break;
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5BF:
+ txqs_vec = MLX5_VPMD_MAX_TXQS_BLUEFIELD;
+ tunnel_en = 1;
+ break;
default:
break;
}
@@ -805,6 +824,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
.cqe_comp = MLX5_ARG_UNSET,
.txq_inline = MLX5_ARG_UNSET,
.txqs_inline = MLX5_ARG_UNSET,
+ .txqs_vec = MLX5_ARG_UNSET,
.mps = MLX5_ARG_UNSET,
.mpw_hdr_dseg = MLX5_ARG_UNSET,
.inline_max_packet_sz = MLX5_ARG_UNSET,
@@ -908,6 +928,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
/* Enable vector by default if supported. */
priv->tx_vec_en = 1;
priv->rx_vec_en = 1;
+ priv->txqs_vec = txqs_vec;
err = mlx5_args(&args, pci_dev->device.devargs);
if (err) {
DRV_LOG(ERR, "failed to process device arguments: %s",
@@ -1154,6 +1175,10 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
},
{
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF)
+ },
+ {
.vendor_id = 0
}
};
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5e6027b8..08b667f9 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -77,6 +77,7 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018,
PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019,
PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5BF = 0xa2d2,
};
struct mlx5_xstats_ctrl {
@@ -138,6 +139,7 @@ struct priv {
unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
unsigned int txqs_inline; /* Queue number threshold for inlining. */
+ unsigned int txqs_vec; /* Queue number threshold for vectorized Tx. */
unsigned int inline_max_packet_sz; /* Max packet size for inlining. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
@@ -152,7 +154,9 @@ struct priv {
struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
struct mlx5_flows flows; /* RTE Flow rules. */
struct mlx5_flows ctrl_flows; /* Control flow rules. */
- LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
+ struct mlx5_mr (*mr)[]; /* Static MR table. */
+ struct mlx5_mr_cache (*mr_cache)[]; /* Global MR cache table. */
+ unsigned int mr_n; /* Size of static MR table. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
LIST_HEAD(hrxq, mlx5_hrxq) hrxqs; /* Verbs Hash Rx queues. */
@@ -301,16 +305,14 @@ void mlx5_flow_delete_drop_queue(struct rte_eth_dev *dev);
/* mlx5_socket.c */
-int mlx5_socket_init(struct rte_eth_dev *priv);
-void mlx5_socket_uninit(struct rte_eth_dev *priv);
-void mlx5_socket_handle(struct rte_eth_dev *priv);
-int mlx5_socket_connect(struct rte_eth_dev *priv);
+int mlx5_socket_init(struct rte_eth_dev *dev);
+void mlx5_socket_uninit(struct rte_eth_dev *dev);
+void mlx5_socket_handle(struct rte_eth_dev *dev);
+int mlx5_socket_connect(struct rte_eth_dev *dev);
/* mlx5_mr.c */
-struct mlx5_mr *mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp);
-struct mlx5_mr *mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp);
-int mlx5_mr_release(struct mlx5_mr *mr);
-int mlx5_mr_verify(struct rte_eth_dev *dev);
+int mlx5_mr_register_memseg(struct rte_eth_dev *dev);
+void mlx5_mr_deregister_memseg(struct rte_eth_dev *dev);
#endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index 9c64bb33..1de3bdc4 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -88,8 +88,13 @@
/* Maximum Packet headers size (L2+L3+L4) for TSO. */
#define MLX5_MAX_TSO_HEADER 128
-/* Default minimum number of Tx queues for vectorized Tx. */
-#define MLX5_VPMD_MIN_TXQS 4
+/* Default maximum number of Tx queues for vectorized Tx. */
+#if defined(RTE_ARCH_ARM64)
+#define MLX5_VPMD_MAX_TXQS 8
+#else
+#define MLX5_VPMD_MAX_TXQS 4
+#endif
+#define MLX5_VPMD_MAX_TXQS_BLUEFIELD 16
/* Threshold of buffer replenishment for vectorized Rx. */
#define MLX5_VPMD_RXQ_RPLNSH_THRESH(n) \
@@ -124,6 +129,12 @@
*/
#define MLX5_UAR_OFFSET (1ULL << 32)
+/* Size of per-queue MR cache table. */
+#define MLX5_MR_CACHE_N 8
+
+/* First entry must be NULL for comparison. */
+#define MLX5_MR_LOOKUP_TABLE_PAD 1
+
/* Definition of static_assert found in /usr/include/assert.h */
#ifndef HAVE_STATIC_ASSERT
#define static_assert _Static_assert
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index e441483a..198c30b3 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -408,6 +408,11 @@ mlx5_dev_configure(struct rte_eth_dev *dev)
ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
if (ret)
return ret;
+ if (mlx5_mr_register_memseg(dev)) {
+ DRV_LOG(ERR, "%p: MR registration failed", (void *)dev);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
/* When the number of RX queues is not a power of two, the remaining
* table entries are padded with reused WQs and hashes are not spread
* uniformly. */
diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index a50c5208..c3410a62 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -47,355 +47,398 @@
#include "mlx5.h"
#include "mlx5_rxtx.h"
-struct mlx5_check_mempool_data {
- int ret;
- char *start;
- char *end;
+struct mr_update_mempool_data {
+ struct rte_eth_dev *dev;
+ struct mlx5_mr_cache *lkp_tbl;
+ uint16_t tbl_sz;
};
-/* Called by mlx5_check_mempool() when iterating the memory chunks. */
-static void
-mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused,
- void *opaque, struct rte_mempool_memhdr *memhdr,
- unsigned int mem_idx __rte_unused)
+/**
+ * Look up LKEY from given lookup table by Binary Search, store the last index
+ * and return searched LKEY.
+ *
+ * @param lkp_tbl
+ * Pointer to lookup table.
+ * @param n
+ * Size of lookup table.
+ * @param[out] idx
+ * Pointer to index. Even on searh failure, returns index where it stops
+ * searching so that index can be used when inserting a new entry.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * Searched LKEY on success, UINT32_MAX on no match.
+ */
+static uint32_t
+mlx5_mr_lookup(struct mlx5_mr_cache *lkp_tbl, uint16_t n, uint16_t *idx,
+ uintptr_t addr)
{
- struct mlx5_check_mempool_data *data = opaque;
+ uint16_t base = 0;
- /* It already failed, skip the next chunks. */
- if (data->ret != 0)
- return;
- /* It is the first chunk. */
- if (data->start == NULL && data->end == NULL) {
- data->start = memhdr->addr;
- data->end = data->start + memhdr->len;
- return;
- }
- if (data->end == memhdr->addr) {
- data->end += memhdr->len;
- return;
- }
- if (data->start == (char *)memhdr->addr + memhdr->len) {
- data->start -= memhdr->len;
- return;
- }
- /* Error, mempool is not virtually contiguous. */
- data->ret = -1;
+ /* First entry must be NULL for comparison. */
+ assert(n == 0 || (lkp_tbl[0].start == 0 &&
+ lkp_tbl[0].lkey == UINT32_MAX));
+ /* Binary search. */
+ do {
+ register uint16_t delta = n >> 1;
+
+ if (addr < lkp_tbl[base + delta].start) {
+ n = delta;
+ } else {
+ base += delta;
+ n -= delta;
+ }
+ } while (n > 1);
+ assert(addr >= lkp_tbl[base].start);
+ *idx = base;
+ if (addr < lkp_tbl[base].end)
+ return lkp_tbl[base].lkey;
+ /* Not found. */
+ return UINT32_MAX;
}
/**
- * Check if a mempool can be used: it must be virtually contiguous.
+ * Insert an entry to LKEY lookup table.
*
- * @param[in] mp
- * Pointer to memory pool.
- * @param[out] start
- * Pointer to the start address of the mempool virtual memory area
- * @param[out] end
- * Pointer to the end address of the mempool virtual memory area
+ * @param lkp_tbl
+ * Pointer to lookup table. The size of array must be enough to add one more
+ * entry.
+ * @param n
+ * Size of lookup table.
+ * @param entry
+ * Pointer to new entry to insert.
*
* @return
- * 0 on success (mempool is virtually contiguous), -1 on error.
+ * Size of returning lookup table.
*/
static int
-mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start,
- uintptr_t *end)
+mlx5_mr_insert(struct mlx5_mr_cache *lkp_tbl, uint16_t n,
+ struct mlx5_mr_cache *entry)
{
- struct mlx5_check_mempool_data data;
+ uint16_t idx = 0;
+ size_t shift;
- memset(&data, 0, sizeof(data));
- rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data);
- *start = (uintptr_t)data.start;
- *end = (uintptr_t)data.end;
- return data.ret;
+ /* Check if entry exist. */
+ if (mlx5_mr_lookup(lkp_tbl, n, &idx, entry->start) != UINT32_MAX)
+ return n;
+ /* Insert entry. */
+ ++idx;
+ shift = (n - idx) * sizeof(struct mlx5_mr_cache);
+ if (shift)
+ memmove(&lkp_tbl[idx + 1], &lkp_tbl[idx], shift);
+ lkp_tbl[idx] = *entry;
+ DRV_LOG(DEBUG, "%p: inserted lkp_tbl[%u], start = 0x%lx, end = 0x%lx",
+ (void *)lkp_tbl, idx, lkp_tbl[idx].start, lkp_tbl[idx].end);
+ return n + 1;
}
/**
- * Register a Memory Region (MR) <-> Memory Pool (MP) association in
- * txq->mp2mr[]. If mp2mr[] is full, remove an entry first.
+ * Incrementally update LKEY lookup table for a specific address from registered
+ * Memory Regions.
*
- * @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
- * @param idx
- * Index of the next available entry.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param lkp_tbl
+ * Pointer to lookup table to fill. The size of array must be at least
+ * (priv->mr_n + 1).
+ * @param n
+ * Size of lookup table.
+ * @param addr
+ * Search key.
*
* @return
- * mr on success, NULL on failure and rte_errno is set.
+ * Size of returning lookup table.
*/
-struct mlx5_mr *
-mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq, struct rte_mempool *mp,
- unsigned int idx)
+static int
+mlx5_mr_update_addr(struct rte_eth_dev *dev, struct mlx5_mr_cache *lkp_tbl,
+ uint16_t n, uintptr_t addr)
{
- struct mlx5_txq_ctrl *txq_ctrl =
- container_of(txq, struct mlx5_txq_ctrl, txq);
- struct rte_eth_dev *dev;
- struct mlx5_mr *mr;
+ struct priv *priv = dev->data->dev_private;
+ uint16_t idx;
+ uint32_t ret __rte_unused;
- rte_spinlock_lock(&txq_ctrl->priv->mr_lock);
- /* Add a new entry, register MR first. */
- DRV_LOG(DEBUG, "port %u discovered new memory pool \"%s\" (%p)",
- PORT_ID(txq_ctrl->priv), mp->name, (void *)mp);
- dev = ETH_DEV(txq_ctrl->priv);
- mr = mlx5_mr_get(dev, mp);
- if (mr == NULL) {
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DRV_LOG(DEBUG,
- "port %u using unregistered mempool 0x%p(%s)"
- " in secondary process, please create mempool"
- " before rte_eth_dev_start()",
- PORT_ID(txq_ctrl->priv), (void *)mp, mp->name);
- rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
- rte_errno = ENOTSUP;
- return NULL;
- }
- mr = mlx5_mr_new(dev, mp);
- }
- if (unlikely(mr == NULL)) {
- DRV_LOG(DEBUG,
- "port %u unable to configure memory region,"
- " ibv_reg_mr() failed.",
- PORT_ID(txq_ctrl->priv));
- rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
- return NULL;
+ if (n == 0) {
+ /* First entry must be NULL for comparison. */
+ lkp_tbl[n++] = (struct mlx5_mr_cache) {
+ .lkey = UINT32_MAX,
+ };
}
- if (unlikely(idx == RTE_DIM(txq->mp2mr))) {
- /* Table is full, remove oldest entry. */
- DRV_LOG(DEBUG,
- "port %u memory region <-> memory pool table full, "
- " dropping oldest entry",
- PORT_ID(txq_ctrl->priv));
- --idx;
- mlx5_mr_release(txq->mp2mr[0]);
- memmove(&txq->mp2mr[0], &txq->mp2mr[1],
- (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0])));
- }
- /* Store the new entry. */
- txq_ctrl->txq.mp2mr[idx] = mr;
- DRV_LOG(DEBUG,
- "port %u new memory region lkey for MP \"%s\" (%p): 0x%08"
- PRIu32,
- PORT_ID(txq_ctrl->priv), mp->name, (void *)mp,
- txq_ctrl->txq.mp2mr[idx]->lkey);
- rte_spinlock_unlock(&txq_ctrl->priv->mr_lock);
- return mr;
+ ret = mlx5_mr_lookup(*priv->mr_cache, MR_TABLE_SZ(priv->mr_n),
+ &idx, addr);
+ /* Lookup must succeed, the global cache is all-inclusive. */
+ assert(ret != UINT32_MAX);
+ DRV_LOG(DEBUG, "port %u adding LKEY (0x%x) for addr 0x%lx",
+ dev->data->port_id, (*priv->mr_cache)[idx].lkey, addr);
+ return mlx5_mr_insert(lkp_tbl, n, &(*priv->mr_cache)[idx]);
}
-struct mlx5_mp2mr_mbuf_check_data {
- int ret;
-};
-
/**
- * Callback function for rte_mempool_obj_iter() to check whether a given
- * mempool object looks like a mbuf.
+ * Bottom-half of LKEY search on datapath. Firstly search in cache_bh[] and if
+ * misses, search in the global MR cache table and update the new entry to
+ * per-queue local caches.
*
- * @param[in] mp
- * The mempool pointer
- * @param[in] arg
- * Context data (struct txq_mp2mr_mbuf_check_data). Contains the
- * return value.
- * @param[in] obj
- * Object address.
- * @param index
- * Object index, unused.
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mr_ctrl
+ * Pointer to per-queue MR control structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * LKEY on success.
*/
-static void
-txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj,
- uint32_t index __rte_unused)
+static inline uint32_t
+mlx5_mr_mb2mr_bh(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+ uintptr_t addr)
{
- struct mlx5_mp2mr_mbuf_check_data *data = arg;
- struct rte_mbuf *buf = obj;
+ uint32_t lkey;
+ uint16_t bh_idx = 0;
+ struct mlx5_mr_cache *mr_cache = &mr_ctrl->cache[mr_ctrl->head];
- /*
- * Check whether mbuf structure fits element size and whether mempool
- * pointer is valid.
- */
- if (sizeof(*buf) > mp->elt_size || buf->pool != mp)
- data->ret = -1;
+ /* Binary-search MR translation table. */
+ lkey = mlx5_mr_lookup(*mr_ctrl->cache_bh, mr_ctrl->bh_n, &bh_idx, addr);
+ if (likely(lkey != UINT32_MAX)) {
+ /* Update cache. */
+ *mr_cache = (*mr_ctrl->cache_bh)[bh_idx];
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
+ return lkey;
+ }
+ /* Missed in the per-queue lookup table. Search in the global cache. */
+ mr_ctrl->bh_n = mlx5_mr_update_addr(dev, *mr_ctrl->cache_bh,
+ mr_ctrl->bh_n, addr);
+ /* Search again with updated entries. */
+ lkey = mlx5_mr_lookup(*mr_ctrl->cache_bh, mr_ctrl->bh_n, &bh_idx, addr);
+ /* Must always succeed. */
+ assert(lkey != UINT32_MAX);
+ /* Update cache. */
+ *mr_cache = (*mr_ctrl->cache_bh)[bh_idx];
+ mr_ctrl->mru = mr_ctrl->head;
+ /* Point to the next victim, the oldest. */
+ mr_ctrl->head = (mr_ctrl->head + 1) % MLX5_MR_CACHE_N;
+ return lkey;
}
/**
- * Iterator function for rte_mempool_walk() to register existing mempools and
- * fill the MP to MR cache of a TX queue.
+ * Bottom-half of mlx5_rx_mb2mr() if search on mr_cache_bh[] fails.
*
- * @param[in] mp
- * Memory Pool to register.
- * @param *arg
- * Pointer to TX queue structure.
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Search key.
+ *
+ * @return
+ * LKEY on success.
*/
-void
-mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg)
+uint32_t
+mlx5_rx_mb2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr)
{
- struct priv *priv = (struct priv *)arg;
- struct mlx5_mp2mr_mbuf_check_data data = {
- .ret = 0,
- };
- struct mlx5_mr *mr;
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- /* Register mempool only if the first element looks like a mbuf. */
- if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 ||
- data.ret == -1)
- return;
- mr = mlx5_mr_get(ETH_DEV(priv), mp);
- if (mr) {
- mlx5_mr_release(mr);
- return;
- }
- mr = mlx5_mr_new(ETH_DEV(priv), mp);
- if (!mr)
- DRV_LOG(ERR, "port %u cannot create memory region: %s",
- PORT_ID(priv), strerror(rte_errno));
+ DRV_LOG(DEBUG,
+ "port %u not found in rxq->mr_cache[], last-hit=%u, head=%u",
+ PORT_ID(rxq_ctrl->priv), rxq->mr_ctrl.mru, rxq->mr_ctrl.head);
+ return mlx5_mr_mb2mr_bh(ETH_DEV(rxq_ctrl->priv), &rxq->mr_ctrl, addr);
}
/**
- * Register a new memory region from the mempool and store it in the memory
- * region list.
+ * Bottom-half of mlx5_tx_mb2mr() if search on cache_bh[] fails.
*
- * @param dev
- * Pointer to Ethernet device.
- * @param mp
- * Pointer to the memory pool to register.
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Search key.
*
* @return
- * The memory region on success, NULL on failure and rte_errno is set.
+ * LKEY on success.
*/
-struct mlx5_mr *
-mlx5_mr_new(struct rte_eth_dev *dev, struct rte_mempool *mp)
+uint32_t
+mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
- const struct rte_memseg *ms = rte_eal_get_physmem_layout();
- uintptr_t start;
- uintptr_t end;
- unsigned int i;
- struct mlx5_mr *mr;
-
- mr = rte_zmalloc_socket(__func__, sizeof(*mr), 0, mp->socket_id);
- if (!mr) {
- DRV_LOG(DEBUG,
- "port %u unable to configure memory region,"
- " ibv_reg_mr() failed.",
- dev->data->port_id);
- rte_errno = ENOMEM;
- return NULL;
- }
- if (mlx5_check_mempool(mp, &start, &end) != 0) {
- DRV_LOG(ERR, "port %u mempool %p: not virtually contiguous",
- dev->data->port_id, (void *)mp);
- rte_errno = ENOMEM;
- return NULL;
- }
- DRV_LOG(DEBUG, "port %u mempool %p area start=%p end=%p size=%zu",
- dev->data->port_id, (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- /* Save original addresses for exact MR lookup. */
- mr->start = start;
- mr->end = end;
- /* Round start and end to page boundary if found in memory segments. */
- for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) {
- uintptr_t addr = (uintptr_t)ms[i].addr;
- size_t len = ms[i].len;
- unsigned int align = ms[i].hugepage_sz;
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
- if ((start > addr) && (start < addr + len))
- start = RTE_ALIGN_FLOOR(start, align);
- if ((end > addr) && (end < addr + len))
- end = RTE_ALIGN_CEIL(end, align);
- }
DRV_LOG(DEBUG,
- "port %u mempool %p using start=%p end=%p size=%zu for memory"
- " region",
- dev->data->port_id, (void *)mp, (void *)start, (void *)end,
- (size_t)(end - start));
- mr->mr = ibv_reg_mr(priv->pd, (void *)start, end - start,
- IBV_ACCESS_LOCAL_WRITE);
- if (!mr->mr) {
- rte_errno = ENOMEM;
- return NULL;
- }
- mr->mp = mp;
- mr->lkey = rte_cpu_to_be_32(mr->mr->lkey);
- rte_atomic32_inc(&mr->refcnt);
- DRV_LOG(DEBUG, "port %u new memory Region %p refcnt: %d",
- dev->data->port_id, (void *)mr, rte_atomic32_read(&mr->refcnt));
- LIST_INSERT_HEAD(&priv->mr, mr, next);
- return mr;
+ "port %u not found in txq->mr_cache[], last-hit=%u, head=%u",
+ PORT_ID(txq_ctrl->priv), txq->mr_ctrl.mru, txq->mr_ctrl.head);
+ return mlx5_mr_mb2mr_bh(ETH_DEV(txq_ctrl->priv), &txq->mr_ctrl, addr);
+}
+
+/* Called by mr_update_mempool() when iterating the memory chunks. */
+static void
+mr_update_mempool_cb(struct rte_mempool *mp __rte_unused,
+ void *opaque, struct rte_mempool_memhdr *memhdr,
+ unsigned int mem_idx __rte_unused)
+{
+ struct mr_update_mempool_data *data = opaque;
+
+ DRV_LOG(DEBUG, "port %u adding chunk[%u] of %s",
+ data->dev->data->port_id, mem_idx, mp->name);
+ data->tbl_sz =
+ mlx5_mr_update_addr(data->dev, data->lkp_tbl, data->tbl_sz,
+ (uintptr_t)memhdr->addr);
}
/**
- * Search the memory region object in the memory region list.
+ * Incrementally update LKEY lookup table for a specific Memory Pool from
+ * registered Memory Regions.
*
* @param dev
* Pointer to Ethernet device.
- * @param mp
- * Pointer to the memory pool to register.
+ * @param[out] lkp_tbl
+ * Pointer to lookup table to fill. The size of array must be at least
+ * (priv->static_mr_n + 1).
+ * @param n
+ * Size of lookup table.
+ * @param[in] mp
+ * Pointer to Memory Pool.
*
* @return
- * The memory region on success.
+ * Size of returning lookup table.
*/
-struct mlx5_mr *
-mlx5_mr_get(struct rte_eth_dev *dev, struct rte_mempool *mp)
+int
+mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_cache *lkp_tbl,
+ uint16_t n, struct rte_mempool *mp)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx5_mr *mr;
+ struct mr_update_mempool_data data = {
+ .dev = dev,
+ .lkp_tbl = lkp_tbl,
+ .tbl_sz = n
+ };
- assert(mp);
- if (LIST_EMPTY(&priv->mr))
- return NULL;
- LIST_FOREACH(mr, &priv->mr, next) {
- if (mr->mp == mp) {
- rte_atomic32_inc(&mr->refcnt);
- DRV_LOG(DEBUG, "port %u memory region %p refcnt: %d",
- dev->data->port_id, (void *)mr,
- rte_atomic32_read(&mr->refcnt));
- return mr;
- }
- }
- return NULL;
+ rte_mempool_mem_iter(mp, mr_update_mempool_cb, &data);
+ return data.tbl_sz;
+}
+
+/* Called by qsort() to compare MR entries. */
+static int
+mr_comp_addr(const void *m1, const void *m2)
+{
+ const struct mlx5_mr *mi1 = m1;
+ const struct mlx5_mr *mi2 = m2;
+
+ if (mi1->memseg->addr < mi2->memseg->addr)
+ return -1;
+ else if (mi1->memseg->addr > mi2->memseg->addr)
+ return 1;
+ else
+ return 0;
}
/**
- * Release the memory region object.
+ * Register entire physical memory to Verbs.
*
- * @param mr
- * Pointer to memory region to release.
+ * @param dev
+ * Pointer to Ethernet device.
*
* @return
- * 1 while a reference on it exists, 0 when freed.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_mr_release(struct mlx5_mr *mr)
+mlx5_mr_register_memseg(struct rte_eth_dev *dev)
{
- assert(mr);
- DRV_LOG(DEBUG, "memory region %p refcnt: %d", (void *)mr,
- rte_atomic32_read(&mr->refcnt));
- if (rte_atomic32_dec_and_test(&mr->refcnt)) {
- claim_zero(ibv_dereg_mr(mr->mr));
- LIST_REMOVE(mr, next);
- rte_free(mr);
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_memseg *ms = rte_eal_get_physmem_layout();
+ struct mlx5_mr *mr;
+ struct mlx5_mr_cache *mr_cache;
+ unsigned int i;
+
+ if (priv->mr_n != 0)
return 0;
+ /* Count the existing memsegs in the system. */
+ for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i)
+ ++priv->mr_n;
+ priv->mr = rte_calloc(__func__, priv->mr_n, sizeof(*mr), 0);
+ if (priv->mr == NULL) {
+ DRV_LOG(ERR,
+ "port %u cannot allocate memory for array of static MR",
+ dev->data->port_id);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ priv->mr_cache = rte_calloc(__func__, MR_TABLE_SZ(priv->mr_n),
+ sizeof(*mr_cache), 0);
+ if (priv->mr_cache == NULL) {
+ DRV_LOG(ERR,
+ "port %u cannot allocate memory for array of MR cache",
+ dev->data->port_id);
+ rte_free(priv->mr);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
- return 1;
+ for (i = 0; i < priv->mr_n; ++i) {
+ mr = &(*priv->mr)[i];
+ mr->memseg = &ms[i];
+ mr->ibv_mr = ibv_reg_mr(priv->pd,
+ mr->memseg->addr, mr->memseg->len,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (mr->ibv_mr == NULL) {
+ rte_dump_physmem_layout(stderr);
+ DRV_LOG(ERR, "port %u cannot register memseg[%u]",
+ dev->data->port_id, i);
+ goto error;
+ }
+ }
+ /* Sort by virtual address. */
+ qsort(*priv->mr, priv->mr_n, sizeof(struct mlx5_mr), mr_comp_addr);
+ /* First entry must be NULL for comparison. */
+ (*priv->mr_cache)[0] = (struct mlx5_mr_cache) {
+ .lkey = UINT32_MAX,
+ };
+ /* Compile global all-inclusive MR cache table. */
+ for (i = 0; i < priv->mr_n; ++i) {
+ mr = &(*priv->mr)[i];
+ mr_cache = &(*priv->mr_cache)[i + 1];
+ /* Paranoid, mr[] must be sorted. */
+ assert(i == 0 || mr->memseg->addr > (mr - 1)->memseg->addr);
+ *mr_cache = (struct mlx5_mr_cache) {
+ .start = (uintptr_t)mr->memseg->addr,
+ .end = (uintptr_t)mr->memseg->addr + mr->memseg->len,
+ .lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey)
+ };
+ }
+ return 0;
+error:
+ for (i = 0; i < priv->mr_n; ++i) {
+ mr = &(*priv->mr)[i];
+ if (mr->ibv_mr != NULL)
+ ibv_dereg_mr(mr->ibv_mr);
+ }
+ rte_free(priv->mr);
+ rte_free(priv->mr_cache);
+ rte_errno = ENOMEM;
+ return -rte_errno;
}
/**
- * Verify the flow list is empty
+ * Deregister all Memory Regions.
*
* @param dev
* Pointer to Ethernet device.
- *
- * @return
- * The number of object not released.
*/
-int
-mlx5_mr_verify(struct rte_eth_dev *dev)
+void
+mlx5_mr_deregister_memseg(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- int ret = 0;
- struct mlx5_mr *mr;
+ unsigned int i;
+
+ if (priv->mr_n == 0)
+ return;
+ for (i = 0; i < priv->mr_n; ++i) {
+ struct mlx5_mr *mr;
- LIST_FOREACH(mr, &priv->mr, next) {
- DRV_LOG(DEBUG, "port %u memory region %p still referenced",
- dev->data->port_id, (void *)mr);
- ++ret;
+ mr = &(*priv->mr)[i];
+ /* Physical memory can't be changed dynamically. */
+ assert(mr->memseg != NULL);
+ assert(mr->ibv_mr != NULL);
+ ibv_dereg_mr(mr->ibv_mr);
}
- return ret;
+ rte_free(priv->mr);
+ rte_free(priv->mr_cache);
+ priv->mr = NULL;
+ priv->mr_cache = NULL;
+ priv->mr_n = 0;
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index dcc5a87b..7161825a 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -595,16 +595,6 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
goto error;
}
tmpl->rxq_ctrl = rxq_ctrl;
- /* Use the entire RX mempool as the memory region. */
- tmpl->mr = mlx5_mr_get(dev, rxq_data->mp);
- if (!tmpl->mr) {
- tmpl->mr = mlx5_mr_new(dev, rxq_data->mp);
- if (!tmpl->mr) {
- DRV_LOG(ERR, "port %u: memeroy region creation failure",
- dev->data->port_id);
- goto error;
- }
- }
if (rxq_ctrl->irq) {
tmpl->channel = ibv_create_comp_channel(priv->ctx);
if (!tmpl->channel) {
@@ -737,14 +727,14 @@ mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
for (i = 0; (i != (unsigned int)(1 << rxq_data->elts_n)); ++i) {
struct rte_mbuf *buf = (*rxq_data->elts)[i];
volatile struct mlx5_wqe_data_seg *scat = &(*rxq_data->wqes)[i];
+ uintptr_t addr = rte_pktmbuf_mtod(buf, uintptr_t);
/* scat->addr must be able to store a pointer. */
assert(sizeof(scat->addr) >= sizeof(uintptr_t));
*scat = (struct mlx5_wqe_data_seg){
- .addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf,
- uintptr_t)),
+ .addr = rte_cpu_to_be_64(addr),
.byte_count = rte_cpu_to_be_32(DATA_LEN(buf)),
- .lkey = tmpl->mr->lkey,
+ .lkey = mlx5_rx_mb2mr(rxq_data, buf)
};
}
rxq_data->rq_db = rwq.dbrec;
@@ -780,8 +770,6 @@ error:
claim_zero(ibv_destroy_cq(tmpl->cq));
if (tmpl->channel)
claim_zero(ibv_destroy_comp_channel(tmpl->channel));
- if (tmpl->mr)
- mlx5_mr_release(tmpl->mr);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
rte_errno = ret; /* Restore rte_errno. */
return NULL;
@@ -811,7 +799,6 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
return NULL;
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
if (rxq_ctrl->ibv) {
- mlx5_mr_get(dev, rxq_data->mp);
rte_atomic32_inc(&rxq_ctrl->ibv->refcnt);
DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
dev->data->port_id, rxq_ctrl->idx,
@@ -832,15 +819,9 @@ mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx)
int
mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv)
{
- int ret;
-
assert(rxq_ibv);
assert(rxq_ibv->wq);
assert(rxq_ibv->cq);
- assert(rxq_ibv->mr);
- ret = mlx5_mr_release(rxq_ibv->mr);
- if (!ret)
- rxq_ibv->mr = NULL;
DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d",
PORT_ID(rxq_ibv->rxq_ctrl->priv),
rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt));
@@ -918,10 +899,12 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
const uint16_t desc_n =
desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ const unsigned int mr_n = MR_TABLE_SZ(priv->mr_n);
tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) +
- desc_n * sizeof(struct rte_mbuf *),
+ desc_n * sizeof(struct rte_mbuf *) +
+ mr_n * sizeof(struct mlx5_mr_cache),
0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
@@ -1019,8 +1002,17 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->rxq.mp = mp;
tmpl->rxq.stats.idx = idx;
tmpl->rxq.elts_n = log2above(desc);
+ tmpl->rxq.rq_repl_thresh =
+ MLX5_VPMD_RXQ_RPLNSH_THRESH(1 << tmpl->rxq.elts_n);
tmpl->rxq.elts =
(struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1);
+ tmpl->rxq.mr_ctrl.cache_bh =
+ (struct mlx5_mr_cache (*)[mr_n])&(*tmpl->rxq.elts)[desc_n];
+ tmpl->rxq.mr_ctrl.bh_n =
+ mlx5_mr_update_mp(dev, *tmpl->rxq.mr_ctrl.cache_bh,
+ tmpl->rxq.mr_ctrl.bh_n, mp);
+ DRV_LOG(DEBUG, "Rx MR lookup table: %u entires built",
+ MR_N(tmpl->rxq.mr_ctrl.bh_n));
tmpl->idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id,
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 1bbce3b7..d95c4bff 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -1920,6 +1920,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
* changes.
*/
wqe->addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(rep, uintptr_t));
+ /* If there's only one MR, no need to replace LKEY in WQEs. */
+ if (unlikely(!IS_SINGLE_MR(rxq->mr_ctrl.bh_n)))
+ wqe->lkey = mlx5_rx_mb2mr(rxq, rep);
if (len > DATA_LEN(seg)) {
len -= DATA_LEN(seg);
++NB_SEGS(pkt);
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index dac3b39f..7e811c10 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -82,17 +82,37 @@ struct mlx5_txq_stats {
struct priv;
-/* Memory region queue object. */
+/* Memory Region object. */
struct mlx5_mr {
- LIST_ENTRY(mlx5_mr) next; /**< Pointer to the next element. */
- rte_atomic32_t refcnt; /*<< Reference counter. */
- uint32_t lkey; /*<< rte_cpu_to_be_32(mr->lkey) */
- uintptr_t start; /* Start address of MR */
- uintptr_t end; /* End address of MR */
- struct ibv_mr *mr; /*<< Memory Region. */
- struct rte_mempool *mp; /*<< Memory Pool. */
+ const struct rte_memseg *memseg;
+ struct ibv_mr *ibv_mr; /* Verbs Memory Region. */
};
+/* Cache entry for Memory Region. */
+struct mlx5_mr_cache {
+ uintptr_t start; /* Start address of MR. */
+ uintptr_t end; /* End address of MR. */
+ uint32_t lkey; /* rte_cpu_to_be_32(ibv_mr->lkey). */
+} __rte_packed;
+
+/* Per-queue MR control descriptor. */
+struct mlx5_mr_ctrl {
+ uint16_t bh_n; /* Size of MR cache table for bottom-half. */
+ uint16_t mru; /* Index of last hit entry. */
+ uint16_t head; /* Index of the oldest entry. */
+ struct mlx5_mr_cache cache[MLX5_MR_CACHE_N]; /* MR cache. */
+ struct mlx5_mr_cache (*cache_bh)[]; /* MR cache for bottom-half. */
+} __rte_packed;
+
+/* MR table size including padding at index 0. */
+#define MR_TABLE_SZ(n) ((n) + MLX5_MR_LOOKUP_TABLE_PAD)
+
+/* Actual table size excluding padding at index 0. */
+#define MR_N(n) ((n) - MLX5_MR_LOOKUP_TABLE_PAD)
+
+/* Whether there's only one entry in MR lookup table. */
+#define IS_SINGLE_MR(n) (MR_N(n) <= 1)
+
/* Compressed CQE context. */
struct rxq_zip {
uint16_t ai; /* Array index. */
@@ -118,9 +138,11 @@ struct mlx5_rxq_data {
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;
- uint16_t rq_ci;
- uint16_t rq_pi;
- uint16_t cq_ci;
+ uint32_t rq_ci;
+ uint32_t rq_pi;
+ uint32_t cq_ci;
+ uint16_t rq_repl_thresh; /* Threshold for buffer replenishment. */
+ struct mlx5_mr_ctrl mr_ctrl;
volatile struct mlx5_wqe_data_seg(*wqes)[];
volatile struct mlx5_cqe(*cqes)[];
struct rxq_zip zip; /* Compressed context. */
@@ -142,7 +164,6 @@ struct mlx5_rxq_ibv {
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_wq *wq; /* Work Queue. */
struct ibv_comp_channel *channel;
- struct mlx5_mr *mr; /* Memory Region (for mp). */
};
/* RX queue control descriptor. */
@@ -200,15 +221,14 @@ struct mlx5_txq_data {
uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
- uint16_t mr_cache_idx; /* Index of last hit entry. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
uint32_t flags; /* Flags for Tx Queue. */
+ struct mlx5_mr_ctrl mr_ctrl;
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
volatile void *bf_reg; /* Blueflame register remapped. */
- struct mlx5_mr *mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MR translation table. */
struct rte_mbuf *(*elts)[]; /* TX elements. */
struct mlx5_txq_stats stats; /* TX queue counters. */
} __rte_cache_aligned;
@@ -337,9 +357,10 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
/* mlx5_mr.c */
-void mlx5_mp2mr_iter(struct rte_mempool *mp, void *arg);
-struct mlx5_mr *mlx5_txq_mp2mr_reg(struct mlx5_txq_data *txq,
- struct rte_mempool *mp, unsigned int idx);
+int mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_cache *lkp_tbl,
+ uint16_t n, struct rte_mempool *mp);
+uint32_t mlx5_rx_mb2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
+uint32_t mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
#ifndef NDEBUG
/**
@@ -527,77 +548,102 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
}
/**
- * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which
- * the cloned mbuf is allocated is returned instead.
+ * Look up LKEY from given lookup table by linear search. Firstly look up the
+ * last-hit entry. If miss, the entire array is searched. If found, update the
+ * last-hit index and return LKEY.
*
- * @param buf
- * Pointer to mbuf.
+ * @param lkp_tbl
+ * Pointer to lookup table.
+ * @param[in,out] cached_idx
+ * Pointer to last-hit index.
+ * @param n
+ * Size of lookup table.
+ * @param addr
+ * Search key.
*
* @return
- * Memory pool where data is located for given mbuf.
+ * Searched LKEY on success, UINT32_MAX on no match.
*/
-static struct rte_mempool *
-mlx5_tx_mb2mp(struct rte_mbuf *buf)
+static __rte_always_inline uint32_t
+mlx5_mr_lookup_cache(struct mlx5_mr_cache *lkp_tbl, uint16_t *cached_idx,
+ uint16_t n, uintptr_t addr)
{
- if (unlikely(RTE_MBUF_INDIRECT(buf)))
- return rte_mbuf_from_indirect(buf)->pool;
- return buf->pool;
+ uint16_t idx;
+
+ if (likely(addr >= lkp_tbl[*cached_idx].start &&
+ addr < lkp_tbl[*cached_idx].end))
+ return lkp_tbl[*cached_idx].lkey;
+ for (idx = 0; idx < n && lkp_tbl[idx].start != 0; ++idx) {
+ if (addr >= lkp_tbl[idx].start &&
+ addr < lkp_tbl[idx].end) {
+ /* Found. */
+ *cached_idx = idx;
+ return lkp_tbl[idx].lkey;
+ }
+ }
+ return UINT32_MAX;
}
/**
- * Get Memory Region (MR) <-> rte_mbuf association from txq->mp2mr[].
- * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
- * remove an entry first.
+ * Query LKEY from address for Rx.
+ *
+ * @param rxq
+ * Pointer to Rx queue structure.
+ * @param addr
+ * Address to search.
+ *
+ * @return
+ * LKEY on success.
+ */
+static __rte_always_inline uint32_t
+mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr)
+{
+ uint32_t lkey;
+
+ /* Linear search on MR cache array. */
+ lkey = mlx5_mr_lookup_cache(rxq->mr_ctrl.cache,
+ &rxq->mr_ctrl.mru,
+ MLX5_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ DEBUG("No found in rxq->mr_cache[], last-hit = %u, head = %u)",
+ rxq->mr_ctrl.mru, rxq->mr_ctrl.head);
+ /* Take slower bottom-half (binary search) on miss. */
+ return mlx5_rx_mb2mr_bh(rxq, addr);
+}
+
+#define mlx5_rx_mb2mr(rxq, mb) mlx5_rx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
+/**
+ * Query LKEY from address for Tx.
*
* @param txq
- * Pointer to TX queue structure.
- * @param[in] mp
- * Memory Pool for which a Memory Region lkey must be returned.
+ * Pointer to Tx queue structure.
+ * @param addr
+ * Address to search.
*
* @return
- * mr->lkey on success, (uint32_t)-1 on failure.
+ * LKEY on success.
*/
static __rte_always_inline uint32_t
-mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
{
- uint16_t i = txq->mr_cache_idx;
- uintptr_t addr = rte_pktmbuf_mtod(mb, uintptr_t);
- struct mlx5_mr *mr;
-
- assert(i < RTE_DIM(txq->mp2mr));
- if (likely(txq->mp2mr[i]->start <= addr && txq->mp2mr[i]->end > addr))
- return txq->mp2mr[i]->lkey;
- for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) {
- if (unlikely(txq->mp2mr[i] == NULL ||
- txq->mp2mr[i]->mr == NULL)) {
- /* Unknown MP, add a new MR for it. */
- break;
- }
- if (txq->mp2mr[i]->start <= addr &&
- txq->mp2mr[i]->end > addr) {
- assert(txq->mp2mr[i]->lkey != (uint32_t)-1);
- txq->mr_cache_idx = i;
- return txq->mp2mr[i]->lkey;
- }
- }
- mr = mlx5_txq_mp2mr_reg(txq, mlx5_tx_mb2mp(mb), i);
- /*
- * Request the reference to use in this queue, the original one is
- * kept by the control plane.
- */
- if (mr) {
- rte_atomic32_inc(&mr->refcnt);
- txq->mr_cache_idx = i >= RTE_DIM(txq->mp2mr) ? i - 1 : i;
- return mr->lkey;
- } else {
- struct rte_mempool *mp = mlx5_tx_mb2mp(mb);
-
- DRV_LOG(WARNING, "failed to register mempool 0x%p(%s)",
- (void *)mp, mp->name);
- }
- return (uint32_t)-1;
+ uint32_t lkey;
+
+ /* Linear search on MR cache array. */
+ lkey = mlx5_mr_lookup_cache(txq->mr_ctrl.cache,
+ &txq->mr_ctrl.mru,
+ MLX5_MR_CACHE_N, addr);
+ if (likely(lkey != UINT32_MAX))
+ return lkey;
+ DEBUG("No found in txq->mr_cache[], last-hit = %u, head = %u)",
+ txq->mr_ctrl.mru, txq->mr_ctrl.head);
+ /* Take slower bottom-half (binary search) on miss. */
+ return mlx5_tx_mb2mr_bh(txq, addr);
}
+#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+
/**
* Ring TX queue doorbell and flush the update if requested.
*
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 982b8f1f..12465b43 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -316,7 +316,7 @@ mlx5_check_vec_tx_support(struct rte_eth_dev *dev)
struct priv *priv = dev->data->dev_private;
if (!priv->tx_vec_en ||
- priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
+ priv->txqs_n > (unsigned int)priv->txqs_vec ||
priv->mps != MLX5_MPW_ENHANCED ||
priv->tso)
return -ENOTSUP;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h
index d504e2ae..750559b8 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.h
@@ -115,9 +115,13 @@ mlx5_rx_replenish_bulk_mbuf(struct mlx5_rxq_data *rxq, uint16_t n)
rxq->stats.rx_nombuf += n;
return;
}
- for (i = 0; i < n; ++i)
+ for (i = 0; i < n; ++i) {
wq[i].addr = rte_cpu_to_be_64((uintptr_t)elts[i]->buf_addr +
RTE_PKTMBUF_HEADROOM);
+ /* If there's only one MR, no need to replace LKEY in WQEs. */
+ if (unlikely(!IS_SINGLE_MR(rxq->mr_ctrl.bh_n)))
+ wq[i].lkey = mlx5_rx_mb2mr(rxq, elts[i]);
+ }
rxq->rq_ci += n;
/* Prevent overflowing into consumed mbufs. */
elts_idx = rxq->rq_ci & q_mask;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
index e748615e..ae37c2bd 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h
@@ -756,7 +756,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
+ if (repl_n >= rxq->rq_repl_thresh)
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
index 7e8c9b88..866a5e9b 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
+++ b/drivers/net/mlx5/mlx5_rxtx_vec_sse.h
@@ -737,7 +737,7 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
* N - (rq_ci - rq_pi) := # of buffers consumed (to be replenished).
*/
repl_n = q_n - (rxq->rq_ci - rxq->rq_pi);
- if (repl_n >= MLX5_VPMD_RXQ_RPLNSH_THRESH(q_n))
+ if (repl_n >= rxq->rq_repl_thresh)
mlx5_rx_replenish_bulk_mbuf(rxq, repl_n);
/* See if there're unreturned mbufs from compressed CQE. */
rcvd_pkt = rxq->cq_ci - rxq->rq_pi;
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index 345ed707..e880d24c 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -356,10 +356,11 @@ int
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct priv *priv = dev->data->dev_private;
- struct rte_eth_stats tmp = {0};
+ struct rte_eth_stats tmp;
unsigned int i;
unsigned int idx;
+ memset(&tmp, 0, sizeof(tmp));
/* Add software counters. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index 9a1d6f95..e6a29cb7 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -74,17 +74,10 @@ mlx5_txq_start(struct rte_eth_dev *dev)
int ret;
for (i = 0; i != priv->txqs_n; ++i) {
- unsigned int idx = 0;
- struct mlx5_mr *mr;
struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
- LIST_FOREACH(mr, &priv->mr, next) {
- mlx5_txq_mp2mr_reg(&txq_ctrl->txq, mr->mp, idx++);
- if (idx == MLX5_PMD_TX_MP_CACHE)
- break;
- }
txq_alloc_elts(txq_ctrl);
txq_ctrl->ibv = mlx5_txq_ibv_new(dev, i);
if (!txq_ctrl->ibv) {
@@ -177,7 +170,6 @@ int
mlx5_dev_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- struct mlx5_mr *mr = NULL;
int ret;
DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
@@ -187,7 +179,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
dev->data->port_id, strerror(rte_errno));
goto error;
}
- rte_mempool_walk(mlx5_mp2mr_iter, priv);
ret = mlx5_txq_start(dev);
if (ret) {
DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
@@ -229,8 +220,6 @@ error:
ret = rte_errno; /* Save rte_errno before cleanup. */
/* Rollback. */
dev->data->dev_started = 0;
- for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
- mlx5_mr_release(mr);
mlx5_flow_stop(dev, &priv->flows);
mlx5_traffic_disable(dev);
mlx5_txq_stop(dev);
@@ -252,7 +241,6 @@ void
mlx5_dev_stop(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- struct mlx5_mr *mr;
dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use. */
@@ -267,8 +255,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
mlx5_dev_interrupt_handler_uninstall(dev);
mlx5_txq_stop(dev);
mlx5_rxq_stop(dev);
- for (mr = LIST_FIRST(&priv->mr); mr; mr = LIST_FIRST(&priv->mr))
- mlx5_mr_release(mr);
mlx5_flow_delete_drop_queue(dev);
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 760ac92d..2ead2177 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -339,7 +339,6 @@ mlx5_txq_ibv_new(struct rte_eth_dev *dev, uint16_t idx)
return NULL;
}
memset(&tmpl, 0, sizeof(struct mlx5_txq_ibv));
- /* MRs will be registered in mp2mr[] later. */
attr.cq = (struct ibv_cq_init_attr_ex){
.comp_mask = 0,
};
@@ -622,10 +621,12 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
struct mlx5_txq_ctrl *tmpl;
+ const unsigned int mr_n = MR_TABLE_SZ(priv->mr_n);
tmpl = rte_calloc_socket("TXQ", 1,
sizeof(*tmpl) +
- desc * sizeof(struct rte_mbuf *),
+ desc * sizeof(struct rte_mbuf *) +
+ mr_n * sizeof(struct mlx5_mr_cache),
0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
@@ -639,7 +640,6 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->idx = idx;
if (priv->mps == MLX5_MPW_ENHANCED)
tmpl->txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
- /* MRs will be registered in mp2mr[] later. */
DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d",
dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr);
DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d",
@@ -700,6 +700,9 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
tmpl->txq.tunnel_en = 1;
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])(tmpl + 1);
+ tmpl->txq.mr_ctrl.cache_bh =
+ (struct mlx5_mr_cache (*)[mr_n])
+ &(*tmpl->txq.elts)[1 << tmpl->txq.elts_n];
tmpl->txq.stats.idx = idx;
rte_atomic32_inc(&tmpl->refcnt);
DRV_LOG(DEBUG, "port %u Tx queue %u: refcnt %d", dev->data->port_id,
@@ -728,15 +731,8 @@ mlx5_txq_get(struct rte_eth_dev *dev, uint16_t idx)
if ((*priv->txqs)[idx]) {
ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl,
txq);
- unsigned int i;
mlx5_txq_ibv_get(dev, idx);
- for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
- if (ctrl->txq.mp2mr[i])
- claim_nonzero
- (mlx5_mr_get(dev,
- ctrl->txq.mp2mr[i]->mp));
- }
rte_atomic32_inc(&ctrl->refcnt);
DRV_LOG(DEBUG, "port %u Tx queue %u refcnt %d",
dev->data->port_id,
@@ -760,7 +756,6 @@ int
mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
{
struct priv *priv = dev->data->dev_private;
- unsigned int i;
struct mlx5_txq_ctrl *txq;
size_t page_size = sysconf(_SC_PAGESIZE);
@@ -771,12 +766,6 @@ mlx5_txq_release(struct rte_eth_dev *dev, uint16_t idx)
txq->idx, rte_atomic32_read(&txq->refcnt));
if (txq->ibv && !mlx5_txq_ibv_release(txq->ibv))
txq->ibv = NULL;
- for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
- if (txq->txq.mp2mr[i]) {
- mlx5_mr_release(txq->txq.mp2mr[i]);
- txq->txq.mp2mr[i] = NULL;
- }
- }
if (priv->uar_base)
munmap((void *)RTE_ALIGN_FLOOR((uintptr_t)txq->txq.bf_reg,
page_size), page_size);
diff --git a/drivers/net/mrvl/mrvl_ethdev.c b/drivers/net/mrvl/mrvl_ethdev.c
index 2fc24785..ba69f1cb 100644
--- a/drivers/net/mrvl/mrvl_ethdev.c
+++ b/drivers/net/mrvl/mrvl_ethdev.c
@@ -107,8 +107,7 @@ static const char * const valid_args[] = {
static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
static struct pp2_hif *hifs[RTE_MAX_LCORE];
static int used_bpools[PP2_NUM_PKT_PROC] = {
- MRVL_MUSDK_BPOOLS_RESERVED,
- MRVL_MUSDK_BPOOLS_RESERVED
+ [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED
};
struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 8ab28dde..c3958d04 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -666,7 +666,7 @@ nfp_net_vf_read_mac(struct nfp_net_hw *hw)
uint32_t tmp;
tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
- memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
+ memcpy(&hw->mac_addr[0], &tmp, 4);
tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
memcpy(&hw->mac_addr[4], &tmp, 2);
@@ -1236,8 +1236,10 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
ETH_TXQ_FLAGS_NOOFFLOADS,
};
- dev_info->flow_type_rss_offloads = ETH_RSS_NONFRAG_IPV4_TCP |
+ dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_TCP |
ETH_RSS_NONFRAG_IPV4_UDP |
+ ETH_RSS_IPV6 |
ETH_RSS_NONFRAG_IPV6_TCP |
ETH_RSS_NONFRAG_IPV6_UDP;
@@ -1822,21 +1824,20 @@ nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
return;
/* If IPv4 and IP checksum error, fail */
- if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
- !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))
+ if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
+ !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
/* If neither UDP nor TCP return */
if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
!(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
return;
- if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
- !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK))
- mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-
- if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) &&
- !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK))
+ if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
+ mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else
mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
}
@@ -1920,6 +1921,18 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
case NFP_NET_RSS_IPV6_EX:
mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
break;
+ case NFP_NET_RSS_IPV4_TCP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
+ case NFP_NET_RSS_IPV6_TCP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
+ case NFP_NET_RSS_IPV4_UDP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
+ case NFP_NET_RSS_IPV6_UDP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
default:
mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
}
@@ -2495,14 +2508,22 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
}
if (rss_hf & ETH_RSS_IPV4)
- cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
- NFP_NET_CFG_RSS_IPV4_TCP |
- NFP_NET_CFG_RSS_IPV4_UDP;
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
if (rss_hf & ETH_RSS_IPV6)
- cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
- NFP_NET_CFG_RSS_IPV6_TCP |
- NFP_NET_CFG_RSS_IPV6_UDP;
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h
index 1ae0ea62..084dfe6b 100644
--- a/drivers/net/nfp/nfp_net_pmd.h
+++ b/drivers/net/nfp/nfp_net_pmd.h
@@ -284,6 +284,8 @@ struct nfp_net_txq {
#define PCIE_DESC_RX_UDP_CSUM_OK (1 << 1)
#define PCIE_DESC_RX_VLAN (1 << 0)
+#define PCIE_DESC_RX_L4_CSUM_OK (PCIE_DESC_RX_TCP_CSUM_OK | \
+ PCIE_DESC_RX_UDP_CSUM_OK)
struct nfp_net_rx_desc {
union {
/* Freelist descriptor */
diff --git a/drivers/net/nfp/nfp_nspu.c b/drivers/net/nfp/nfp_nspu.c
index f9089832..d4abb6c8 100644
--- a/drivers/net/nfp/nfp_nspu.c
+++ b/drivers/net/nfp/nfp_nspu.c
@@ -9,6 +9,7 @@
#include <rte_log.h>
#include <rte_byteorder.h>
+#include <rte_string_fns.h>
#include "nfp_nfpu.h"
@@ -423,7 +424,9 @@ nfp_nspu_set_bar_from_symbl(nspu_desc_t *desc, const char *symbl,
if (!sym_buf)
return -ENOMEM;
- strncpy(sym_buf, symbl, strlen(symbl));
+ memset(sym_buf, 0, desc->buf_size);
+ memcpy(sym_buf, symbl, strlen(symbl));
+
ret = nspu_command(desc, NSP_CMD_GET_SYMBOL, 1, 1, sym_buf,
NFP_SYM_DESC_LEN, strlen(symbl));
if (ret) {
diff --git a/drivers/net/octeontx/base/octeontx_pki_var.h b/drivers/net/octeontx/base/octeontx_pki_var.h
index def6cbb9..41b44606 100644
--- a/drivers/net/octeontx/base/octeontx_pki_var.h
+++ b/drivers/net/octeontx/base/octeontx_pki_var.h
@@ -35,8 +35,17 @@
#include <rte_byteorder.h>
-#define OCTTX_PACKET_WQE_SKIP 128
-#define OCTTX_PACKET_FIRST_SKIP 240
+#define OCTTX_PACKET_WQE_SKIP 128
+#define OCTTX_PACKET_FIRST_SKIP_MAXREGVAL 496
+#define OCTTX_PACKET_FIRST_SKIP_MAXLEN 512
+#define OCTTX_PACKET_FIRST_SKIP_ADJUST(x) \
+ (RTE_MIN(x, OCTTX_PACKET_FIRST_SKIP_MAXREGVAL))
+#define OCTTX_PACKET_FIRST_SKIP_SUM(p) \
+ (OCTTX_PACKET_WQE_SKIP \
+ + rte_pktmbuf_priv_size(p) \
+ + RTE_PKTMBUF_HEADROOM)
+#define OCTTX_PACKET_FIRST_SKIP(p) \
+ OCTTX_PACKET_FIRST_SKIP_ADJUST(OCTTX_PACKET_FIRST_SKIP_SUM(p))
#define OCTTX_PACKET_LATER_SKIP 128
/* WQE descriptor */
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 13dfbbc0..049bc32b 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -892,10 +892,11 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
pktbuf_conf.mmask.f_cache_mode = 1;
pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP;
- pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP;
+ pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP(mb_pool);
pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP;
pktbuf_conf.mbuff_size = (mb_pool->elt_size -
RTE_PKTMBUF_HEADROOM -
+ rte_pktmbuf_priv_size(mb_pool) -
sizeof(struct rte_mbuf));
pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT;
@@ -1263,15 +1264,8 @@ octeontx_probe(struct rte_vdev_device *dev)
res = -EINVAL;
goto parse_error;
}
- if (pnum > qnum) {
- /*
- * We don't poll on event ports
- * that do not have any queues assigned.
- */
- pnum = qnum;
- PMD_INIT_LOG(INFO,
- "reducing number of active event ports to %d", pnum);
- }
+
+ /* Enable all queues available */
for (i = 0; i < qnum; i++) {
res = rte_event_queue_setup(evdev, i, NULL);
if (res < 0) {
@@ -1281,6 +1275,7 @@ octeontx_probe(struct rte_vdev_device *dev)
}
}
+ /* Enable all ports available */
for (i = 0; i < pnum; i++) {
res = rte_event_port_setup(evdev, i, NULL);
if (res < 0) {
@@ -1289,6 +1284,14 @@ octeontx_probe(struct rte_vdev_device *dev)
i, res);
goto parse_error;
}
+ }
+
+ /*
+ * Do 1:1 links for ports & queues. All queues would be mapped to
+ * one port. If there are more ports than queues, then some ports
+ * won't be linked to any queue.
+ */
+ for (i = 0; i < qnum; i++) {
/* Link one queue to one event port */
qlist = i;
res = rte_event_port_link(evdev, i, &qlist, NULL, 1);
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index c97d5b35..e6f917f7 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -91,6 +91,7 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
count = 0;
+ rte_io_wmb();
while (count < nb_pkts) {
res = __octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va,
dq->fc_status_va,
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 52c2f0ec..a5dde7a6 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -449,6 +449,7 @@ u32 qede_crc32(u32 crc, u8 *ptr, u32 length);
#define OSAL_CRC8(table, pdata, nbytes, crc) 0
#define OSAL_MFW_TLV_REQ(p_hwfn) nothing
#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_HW_INFO_CHANGE(p_hwfn, change) nothing
#define OSAL_MFW_CMD_PREEMPT(p_hwfn) nothing
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 092606be..ca3bb178 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -2334,6 +2334,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 ether_type;
int i;
if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
@@ -2366,6 +2367,25 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
+ if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits) ||
+ OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+ &p_dev->mf_bits))) {
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits))
+ ether_type = ETHER_TYPE_VLAN;
+ else
+ ether_type = ETHER_TYPE_QINQ;
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+ ether_type);
+ }
+
ecore_fill_load_req_params(&load_req_params,
p_params->p_drv_load_params);
rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
@@ -4096,6 +4116,13 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+
+ /* Workaround for MFW issue where PF FLR does not cleanup
+ * IGU block
+ */
+ if (!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP))
+ ecore_pf_flr_igu_cleanup(p_hwfn);
}
/* Check if mdump logs/data are present and update the epoch value */
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 61e36a43..367fe5ee 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -6,6 +6,8 @@
* See LICENSE.qede_pmd for copyright and licensing details.
*/
+#include <rte_string_fns.h>
+
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_spq.h"
@@ -1108,9 +1110,9 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
p_aeu->bit_name,
num);
else
- OSAL_STRNCPY(bit_name,
- p_aeu->bit_name,
- 30);
+ strlcpy(bit_name,
+ p_aeu->bit_name,
+ sizeof(bit_name));
/* We now need to pass bitmask in its
* correct position.
@@ -2679,3 +2681,35 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+
+void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+ int i;
+
+ /* Do not reorder the following cleanup sequence */
+ /* Ack all attentions */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ACK_BITS, 0xfff);
+
+ /* Clear driver attention */
+ ecore_wr(p_hwfn, p_dpc_ptt,
+ ((p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0), 0);
+
+ /* Clear per-PF IGU registers to restore them as if the IGU
+ * was reset for this PF
+ */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+
+ /* Execute IGU clean up*/
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_FUNCTIONAL_CLEANUP, 1);
+
+ /* Clear Stats */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED, 0);
+
+ for (i = 0; i < IGU_REG_PBA_STS_PF_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PBA_STS_PF + i * 4, 0);
+}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 563051c3..ebee68b5 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -258,4 +258,5 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 8edd2e96..8adb7fbe 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -1648,6 +1648,49 @@ ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
&param);
}
+static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+
+ p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
+ FUNC_MF_CFG_OV_STAG_MASK;
+ p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
+ if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+ p_hwfn->hw_info.ovlan);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+ p_hwfn->hw_info.ovlan);
+ } else {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+ }
+
+ ecore_sp_pf_update_stag(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+ OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
+
+ /* Acknowledge the MFW */
+ ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, &param);
+}
+
static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
{
/* A single notification should be sent to upper driver in CMT mode */
@@ -2033,6 +2076,9 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
case MFW_DRV_MSG_BW_UPDATE:
ecore_mcp_update_bw(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_S_TAG_UPDATE:
+ ecore_mcp_update_stag(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_FAILURE_DETECTED:
ecore_mcp_handle_fan_failure(p_hwfn);
break;
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index be3e91f0..6b7a987c 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -523,6 +523,10 @@ union ecore_mfw_tlv_data {
struct ecore_mfw_tlv_iscsi iscsi;
};
+enum ecore_hw_info_change {
+ ECORE_HW_INFO_CHANGE_OVLAN,
+};
+
/**
* @brief - returns the link params of the hw function
*
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 81ca6634..8070d28d 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1259,6 +1259,7 @@ struct public_drv_mb {
*/
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
+#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
/*deprecated don't use*/
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
@@ -1777,6 +1778,8 @@ struct public_drv_mb {
#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001
/* MFW supports EEE */
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
+/* MFW support complete IGU cleanup upon FLR */
+#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP 0x00000080
/* MFW supports virtual link */
#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index ad15d28a..da89adeb 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -332,6 +332,21 @@
0x180820UL
#define IGU_REG_ATTN_MSG_ADDR_H \
0x180824UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define IGU_REG_ATTENTION_ACK_BITS \
+ 0x180838UL
+#define IGU_REG_PBA_STS_PF \
+ 0x180d20UL
+#define IGU_REG_PF_FUNCTIONAL_CLEANUP \
+ 0x181210UL
+#define IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED \
+ 0x18042cUL
+#define IGU_REG_PBA_STS_PF_SIZE 5
+#define IGU_REG_PBA_STS_PF \
+ 0x180d20UL
#define MISC_REG_AEU_GENERAL_ATTN_0 \
0x008400UL
#define CAU_REG_SB_ADDR_MEMORY \
@@ -1222,3 +1237,8 @@
#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10)
#define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL
#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
+
+#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL
+#define NIG_REG_LLH_FUNC_TAG_VALUE 0x5019d0UL
+#define DORQ_REG_TAG1_OVRD_MODE 0x1008b4UL
+#define DORQ_REG_PF_EXT_VID_BB_K2 0x1008c8UL
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 4a5e4857..1844eea4 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -1383,7 +1383,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (eth_dev->data->dev_conf.rxmode.jumbo_frame)
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN;
+ ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
if (qede_start_vport(qdev, eth_dev->data->mtu))
return -1;
@@ -2377,19 +2377,18 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
struct qede_fastpath *fp;
uint32_t max_rx_pkt_len;
uint32_t frame_size;
- uint16_t rx_buf_size;
uint16_t bufsz;
bool restart = false;
- int i;
+ int i, rc;
PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
- max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
- frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+ max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
+ frame_size = max_rx_pkt_len;
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
- ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
+ QEDE_ETH_OVERHEAD);
return -EINVAL;
}
if (!dev->data->scattered_rx &&
@@ -2417,14 +2416,15 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
if (fp->rxq != NULL) {
bufsz = (uint16_t)rte_pktmbuf_data_room_size(
fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + ETHER_HDR_LEN +
- ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = frame_size;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "RX buffer size %u\n", rx_buf_size);
+ /* cache align the mbuf size to simplfy rx_buf_size
+ * calculation
+ */
+ bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
+ rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
+ if (rc < 0)
+ return rc;
+
+ fp->rxq->rx_buf_size = rc;
}
}
if (max_rx_pkt_len > ETHER_MAX_LEN)
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
index 05152566..050b7376 100644
--- a/drivers/net/qede/qede_fdir.c
+++ b/drivers/net/qede/qede_fdir.c
@@ -18,6 +18,7 @@
#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
#define QEDE_FDIR_IPV4_DEF_TTL (64)
+#define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000)
/* Sum of length of header types of L2, L3, L4.
* L2 : ether_hdr + vlan_hdr + vxlan_hdr
@@ -340,18 +341,21 @@ qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
ip6->proto = input->flow.ipv6_flow.proto ?
input->flow.ipv6_flow.proto :
next_proto[input->flow_type];
- rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
+ rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.src_ip,
IPV6_ADDR_LEN);
- rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
+ rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.dst_ip,
IPV6_ADDR_LEN);
len += sizeof(struct ipv6_hdr);
+ params->ipv6 = true;
raw_pkt = (uint8_t *)buff;
/* UDP */
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
udp = (struct udp_hdr *)(raw_pkt + len);
- udp->src_port = input->flow.udp6_flow.dst_port;
- udp->dst_port = input->flow.udp6_flow.src_port;
+ udp->src_port = input->flow.udp6_flow.src_port;
+ udp->dst_port = input->flow.udp6_flow.dst_port;
len += sizeof(struct udp_hdr);
params->udp = true;
} else { /* TCP */
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 95b4cd91..107f074a 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -9,6 +9,7 @@
#include <limits.h>
#include <time.h>
#include <rte_alarm.h>
+#include <rte_string_fns.h>
#include "qede_ethdev.h"
@@ -302,9 +303,8 @@ static int qed_slowpath_start(struct ecore_dev *edev,
drv_version.version = (params->drv_major << 24) |
(params->drv_minor << 16) |
(params->drv_rev << 8) | (params->drv_eng);
- /* TBD: strlcpy() */
- strncpy((char *)drv_version.name, (const char *)params->name,
- MCP_DRV_VER_STR_SIZE - 4);
+ strlcpy((char *)drv_version.name, (const char *)params->name,
+ sizeof(drv_version.name));
rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index ffe196a6..cdb85c21 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -37,6 +37,49 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
return 0;
}
+/* Criterias for calculating Rx buffer size -
+ * 1) rx_buf_size should not exceed the size of mbuf
+ * 2) In scattered_rx mode - minimum rx_buf_size should be
+ * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
+ * 3) In regular mode - minimum rx_buf_size should be
+ * (MTU + Maximum L2 Header Size + 2)
+ * In above cases +2 corrosponds to 2 bytes padding in front of L2
+ * header.
+ * 4) rx_buf_size should be cacheline-size aligned. So considering
+ * criteria 1, we need to adjust the size to floor instead of ceil,
+ * so that we don't exceed mbuf size while ceiling rx_buf_size.
+ */
+int
+qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rx_buf_size;
+
+ if (dev->data->scattered_rx) {
+ /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
+ * bufferes can be used for single packet. So need to make sure
+ * mbuf size is sufficient enough for this.
+ */
+ if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
+ (max_frame_size + QEDE_ETH_OVERHEAD)) {
+ DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
+ mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
+ return -EINVAL;
+ }
+
+ rx_buf_size = RTE_MAX(mbufsz,
+ (max_frame_size + QEDE_ETH_OVERHEAD) /
+ ETH_RX_MAX_BUFF_PER_PKT);
+ } else {
+ rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
+ }
+
+ /* Align to cache-line size if needed */
+ return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
+}
+
int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
@@ -87,6 +130,9 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ /* cache align the mbuf size to simplfy rx_buf_size calculation */
+ bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
+
if ((rxmode->enable_scatter) ||
(max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
@@ -95,13 +141,13 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
- if (dev->data->scattered_rx)
- rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
- ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
- else
- rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
- /* Align to cache-line size if needed */
- rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
+ rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
+ if (rc < 0) {
+ rte_free(rxq);
+ return rc;
+ }
+
+ rxq->rx_buf_size = rc;
DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
@@ -192,12 +238,13 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
void qede_rx_queue_release(void *rx_queue)
{
struct qede_rx_queue *rxq = rx_queue;
- struct qede_dev *qdev = rxq->qdev;
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
- PMD_INIT_FUNC_TRACE(edev);
+ struct qede_dev *qdev;
+ struct ecore_dev *edev;
if (rxq) {
+ qdev = rxq->qdev;
+ edev = QEDE_INIT_EDEV(qdev);
+ PMD_INIT_FUNC_TRACE(edev);
qede_rx_queue_release_mbufs(rxq);
qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
@@ -356,12 +403,13 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
void qede_tx_queue_release(void *tx_queue)
{
struct qede_tx_queue *txq = tx_queue;
- struct qede_dev *qdev = txq->qdev;
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
- PMD_INIT_FUNC_TRACE(edev);
+ struct qede_dev *qdev;
+ struct ecore_dev *edev;
if (txq) {
+ qdev = txq->qdev;
+ edev = QEDE_INIT_EDEV(qdev);
+ PMD_INIT_FUNC_TRACE(edev);
qede_tx_queue_release_mbufs(txq);
qdev->ops->common->chain_free(edev, &txq->tx_pbl);
rte_free(txq->sw_tx_ring);
@@ -1716,6 +1764,16 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
}
}
if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
+ /* We support only limited tunnel protocols */
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ uint64_t temp;
+
+ temp = ol_flags & PKT_TX_TUNNEL_MASK;
+ if (temp == PKT_TX_TUNNEL_VXLAN ||
+ temp == PKT_TX_TUNNEL_MPLSINUDP)
+ break;
+ }
+
rte_errno = -ENOTSUP;
break;
}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index ae88206d..fe80237d 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -63,9 +63,16 @@
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
~(QEDE_FW_RX_ALIGN_END - 1))
-/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
-#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
- + (QEDE_LLC_SNAP_HDR_LEN))
+#define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \
+ QEDE_FW_RX_ALIGN_END)
+
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional,
+ * +2 is for padding in front of L2 header
+ */
+#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN) + 2)
+
+#define QEDE_MAX_ETHER_HDR_LEN (ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
ETH_RSS_NONFRAG_IPV4_TCP |\
@@ -145,13 +152,14 @@
PKT_TX_TCP_CKSUM | \
PKT_TX_UDP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_TCP_SEG)
+ PKT_TX_TCP_SEG | \
+ PKT_TX_IPV4 | \
+ PKT_TX_IPV6)
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
PKT_TX_QINQ_PKT | \
PKT_TX_VLAN_PKT | \
- PKT_TX_TUNNEL_VXLAN | \
- PKT_TX_TUNNEL_MPLSINUDP)
+ PKT_TX_TUNNEL_MASK)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
@@ -269,6 +277,8 @@ uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
int qede_start_queues(struct rte_eth_dev *eth_dev);
void qede_stop_queues(struct rte_eth_dev *eth_dev);
+int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);
diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c
index d9389dab..b2a4a28e 100644
--- a/drivers/net/sfc/base/ef10_ev.c
+++ b/drivers/net/sfc/base/ef10_ev.c
@@ -97,11 +97,10 @@ efx_mcdi_set_evq_tmr(
__in uint32_t timer_ns)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
- MC_CMD_SET_EVQ_TMR_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
+ MC_CMD_SET_EVQ_TMR_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_EVQ_TMR;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
@@ -147,9 +146,9 @@ efx_mcdi_init_evq(
__in boolean_t low_latency)
{
efx_mcdi_req_t req;
- uint8_t payload[
- MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
- MC_CMD_INIT_EVQ_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_OUT_LEN);
efx_qword_t *dma_addr;
uint64_t addr;
int npages;
@@ -164,7 +163,6 @@ efx_mcdi_init_evq(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_EVQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
@@ -284,9 +282,9 @@ efx_mcdi_init_evq_v2(
__in uint32_t flags)
{
efx_mcdi_req_t req;
- uint8_t payload[
- MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
- MC_CMD_INIT_EVQ_V2_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_V2_OUT_LEN);
boolean_t interrupting;
unsigned int evq_type;
efx_qword_t *dma_addr;
@@ -301,7 +299,6 @@ efx_mcdi_init_evq_v2(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_EVQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
@@ -408,11 +405,10 @@ efx_mcdi_fini_evq(
__in uint32_t instance)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
- MC_CMD_FINI_EVQ_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN,
+ MC_CMD_FINI_EVQ_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FINI_EVQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
@@ -624,8 +620,8 @@ efx_mcdi_driver_event(
__in efx_qword_t data)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
- MC_CMD_DRIVER_EVENT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
+ MC_CMD_DRIVER_EVENT_OUT_LEN);
efx_rc_t rc;
req.emr_cmd = MC_CMD_DRIVER_EVENT;
diff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c
index e1faf1dd..ca15c53e 100644
--- a/drivers/net/sfc/base/ef10_filter.c
+++ b/drivers/net/sfc/base/ef10_filter.c
@@ -190,11 +190,10 @@ efx_mcdi_filter_op_add(
__inout ef10_filter_handle_t *handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN,
- MC_CMD_FILTER_OP_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FILTER_OP_EXT_IN_LEN,
+ MC_CMD_FILTER_OP_EXT_OUT_LEN);
efx_rc_t rc;
- memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FILTER_OP;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN;
@@ -353,11 +352,10 @@ efx_mcdi_filter_op_delete(
__inout ef10_filter_handle_t *handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FILTER_OP_EXT_IN_LEN,
- MC_CMD_FILTER_OP_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FILTER_OP_EXT_IN_LEN,
+ MC_CMD_FILTER_OP_EXT_OUT_LEN);
efx_rc_t rc;
- memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FILTER_OP;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FILTER_OP_EXT_IN_LEN;
@@ -917,13 +915,12 @@ efx_mcdi_get_parser_disp_info(
__out size_t *list_lengthp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
- MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
size_t matches_count;
size_t list_size;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN;
@@ -1057,12 +1054,15 @@ ef10_filter_insert_unicast(
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
eftp->eft_default_rxq);
- efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr);
+ rc = efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,
+ addr);
+ if (rc != 0)
+ goto fail1;
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
&eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
if (rc != 0)
- goto fail1;
+ goto fail2;
eftp->eft_unicst_filter_count++;
EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
@@ -1070,6 +1070,8 @@ ef10_filter_insert_unicast(
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
@@ -1088,11 +1090,13 @@ ef10_filter_insert_all_unicast(
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
eftp->eft_default_rxq);
- efx_filter_spec_set_uc_def(&spec);
+ rc = efx_filter_spec_set_uc_def(&spec);
+ if (rc != 0)
+ goto fail1;
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
&eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
if (rc != 0)
- goto fail1;
+ goto fail2;
eftp->eft_unicst_filter_count++;
EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
@@ -1100,6 +1104,8 @@ ef10_filter_insert_all_unicast(
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
@@ -1141,9 +1147,21 @@ ef10_filter_insert_multicast_list(
filter_flags,
eftp->eft_default_rxq);
- efx_filter_spec_set_eth_local(&spec,
+ rc = efx_filter_spec_set_eth_local(&spec,
EFX_FILTER_SPEC_VID_UNSPEC,
&addrs[i * EFX_MAC_ADDR_LEN]);
+ if (rc != 0) {
+ if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ } else {
+ /*
+ * Don't try to add a filter with a corrupt
+ * specification.
+ */
+ continue;
+ }
+ }
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
&filter_index);
@@ -1166,8 +1184,12 @@ ef10_filter_insert_multicast_list(
eftp->eft_default_rxq);
EFX_MAC_BROADCAST_ADDR_SET(addr);
- efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,
- addr);
+ rc = efx_filter_spec_set_eth_local(&spec,
+ EFX_FILTER_SPEC_VID_UNSPEC, addr);
+ if ((rc != 0) && (rollback == B_TRUE)) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
&filter_index);
@@ -1215,12 +1237,14 @@ ef10_filter_insert_all_multicast(
efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags,
eftp->eft_default_rxq);
- efx_filter_spec_set_mc_def(&spec);
+ rc = efx_filter_spec_set_mc_def(&spec);
+ if (rc != 0)
+ goto fail1;
rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
&eftp->eft_mulcst_filter_indexes[0]);
if (rc != 0)
- goto fail1;
+ goto fail2;
eftp->eft_mulcst_filter_count = 1;
eftp->eft_using_all_mulcst = B_TRUE;
@@ -1231,6 +1255,8 @@ ef10_filter_insert_all_multicast(
return (0);
+fail2:
+ EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -1465,7 +1491,7 @@ ef10_filter_reconfigure(
/*
* Insert or renew unicast filters.
*
- * Frimware does not perform chaining on unicast filters. As traffic is
+ * Firmware does not perform chaining on unicast filters. As traffic is
* therefore only delivered to the first matching filter, we should
* always insert the specific filter for our MAC address, to try and
* ensure we get that traffic.
diff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h
index 8f9eb7a3..d573a81f 100644
--- a/drivers/net/sfc/base/ef10_impl.h
+++ b/drivers/net/sfc/base/ef10_impl.h
@@ -454,7 +454,7 @@ ef10_nvram_partn_write(
__in efx_nic_t *enp,
__in uint32_t partn,
__in unsigned int offset,
- __out_bcount(size) caddr_t data,
+ __in_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn efx_rc_t
diff --git a/drivers/net/sfc/base/ef10_intr.c b/drivers/net/sfc/base/ef10_intr.c
index 16be3d8c..db952d3d 100644
--- a/drivers/net/sfc/base/ef10_intr.c
+++ b/drivers/net/sfc/base/ef10_intr.c
@@ -75,8 +75,8 @@ efx_mcdi_trigger_interrupt(
__in unsigned int level)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_TRIGGER_INTERRUPT_IN_LEN,
- MC_CMD_TRIGGER_INTERRUPT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_TRIGGER_INTERRUPT_IN_LEN,
+ MC_CMD_TRIGGER_INTERRUPT_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
@@ -87,7 +87,6 @@ efx_mcdi_trigger_interrupt(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_TRIGGER_INTERRUPT;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN;
diff --git a/drivers/net/sfc/base/ef10_mac.c b/drivers/net/sfc/base/ef10_mac.c
index 488633f5..831c79df 100644
--- a/drivers/net/sfc/base/ef10_mac.c
+++ b/drivers/net/sfc/base/ef10_mac.c
@@ -99,11 +99,10 @@ efx_mcdi_vadapter_set_mac(
{
efx_port_t *epp = &(enp->en_port);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN,
- MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_SET_MAC_IN_LEN,
+ MC_CMD_VADAPTOR_SET_MAC_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN;
@@ -165,11 +164,10 @@ efx_mcdi_mtu_set(
__in uint32_t mtu)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
- MC_CMD_SET_MAC_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
@@ -202,11 +200,10 @@ efx_mcdi_mtu_get(
__out size_t *mtu)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
- MC_CMD_SET_MAC_V2_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_V2_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
@@ -298,11 +295,10 @@ ef10_mac_reconfigure(
{
efx_port_t *epp = &(enp->en_port);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
- MC_CMD_SET_MAC_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
@@ -435,7 +431,7 @@ ef10_mac_filter_default_rxq_clear(
ef10_filter_default_rxq_clear(enp);
- efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ (void) efx_filter_reconfigure(enp, epp->ep_mac_addr,
epp->ep_all_unicst, epp->ep_mulcst,
epp->ep_all_mulcst, epp->ep_brdcst,
epp->ep_mulcst_addr_list,
@@ -612,7 +608,7 @@ ef10_mac_stats_update(
EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
- EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
diff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c
index 58d1b0af..c92acea7 100644
--- a/drivers/net/sfc/base/ef10_nic.c
+++ b/drivers/net/sfc/base/ef10_nic.c
@@ -44,14 +44,13 @@ efx_mcdi_get_port_assignment(
__out uint32_t *portp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
- MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
+ MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
@@ -89,14 +88,13 @@ efx_mcdi_get_port_modes(
__out_opt uint32_t *current_modep)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
- MC_CMD_GET_PORT_MODES_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN,
+ MC_CMD_GET_PORT_MODES_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PORT_MODES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
@@ -196,13 +194,12 @@ efx_mcdi_vadaptor_alloc(
__in uint32_t port_id)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
- MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN,
+ MC_CMD_VADAPTOR_ALLOC_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
@@ -235,11 +232,10 @@ efx_mcdi_vadaptor_free(
__in uint32_t port_id)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
- MC_CMD_VADAPTOR_FREE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN,
+ MC_CMD_VADAPTOR_FREE_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_VADAPTOR_FREE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
@@ -269,14 +265,13 @@ efx_mcdi_get_mac_address_pf(
__out_ecount_opt(6) uint8_t mac_addrp[6])
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
- MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
@@ -327,14 +322,13 @@ efx_mcdi_get_mac_address_vf(
__out_ecount_opt(6) uint8_t mac_addrp[6])
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
- MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
@@ -391,14 +385,13 @@ efx_mcdi_get_clock(
__out uint32_t *dpcpu_freqp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
- MC_CMD_GET_CLOCK_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN,
+ MC_CMD_GET_CLOCK_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
enp->en_family == EFX_FAMILY_MEDFORD);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_CLOCK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
@@ -450,11 +443,10 @@ efx_mcdi_get_vector_cfg(
__out_opt uint32_t *vf_nvecp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
- MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN,
+ MC_CMD_GET_VECTOR_CFG_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
@@ -500,8 +492,8 @@ efx_mcdi_alloc_vis(
__out uint32_t *vi_shiftp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
- MC_CMD_ALLOC_VIS_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN,
+ MC_CMD_ALLOC_VIS_EXT_OUT_LEN);
efx_rc_t rc;
if (vi_countp == NULL) {
@@ -509,7 +501,6 @@ efx_mcdi_alloc_vis(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_ALLOC_VIS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
@@ -592,8 +583,8 @@ efx_mcdi_alloc_piobuf(
__out efx_piobuf_handle_t *handlep)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
- MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN,
+ MC_CMD_ALLOC_PIOBUF_OUT_LEN);
efx_rc_t rc;
if (handlep == NULL) {
@@ -601,7 +592,6 @@ efx_mcdi_alloc_piobuf(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
@@ -640,11 +630,10 @@ efx_mcdi_free_piobuf(
__in efx_piobuf_handle_t handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
- MC_CMD_FREE_PIOBUF_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN,
+ MC_CMD_FREE_PIOBUF_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FREE_PIOBUF;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
@@ -675,11 +664,10 @@ efx_mcdi_link_piobuf(
__in efx_piobuf_handle_t handle)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
- MC_CMD_LINK_PIOBUF_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_LINK_PIOBUF_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LINK_PIOBUF;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
@@ -710,11 +698,10 @@ efx_mcdi_unlink_piobuf(
__in uint32_t vi_index)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
- MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
@@ -767,7 +754,7 @@ fail1:
for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
- efx_mcdi_free_piobuf(enp, *handlep);
+ (void) efx_mcdi_free_piobuf(enp, *handlep);
*handlep = EFX_PIOBUF_HANDLE_INVALID;
}
enp->en_arch.ef10.ena_piobuf_count = 0;
@@ -784,7 +771,7 @@ ef10_nic_free_piobufs(
for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
- efx_mcdi_free_piobuf(enp, *handlep);
+ (void) efx_mcdi_free_piobuf(enp, *handlep);
*handlep = EFX_PIOBUF_HANDLE_INVALID;
}
enp->en_arch.ef10.ena_piobuf_count = 0;
@@ -911,11 +898,10 @@ ef10_mcdi_get_pf_count(
__out uint32_t *pf_countp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,
- MC_CMD_GET_PF_COUNT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN,
+ MC_CMD_GET_PF_COUNT_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PF_COUNT;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
@@ -1458,8 +1444,8 @@ ef10_nic_reset(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
- MC_CMD_ENTITY_RESET_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN,
+ MC_CMD_ENTITY_RESET_OUT_LEN);
efx_rc_t rc;
/* ef10_nic_reset() is called to recover from BADASSERT failures. */
@@ -1468,7 +1454,6 @@ ef10_nic_reset(
if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
goto fail2;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_ENTITY_RESET;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
diff --git a/drivers/net/sfc/base/ef10_nvram.c b/drivers/net/sfc/base/ef10_nvram.c
index 3f9d3750..75ac015d 100644
--- a/drivers/net/sfc/base/ef10_nvram.c
+++ b/drivers/net/sfc/base/ef10_nvram.c
@@ -1825,7 +1825,7 @@ ef10_nvram_partn_write_segment_tlv(
goto fail7;
/* Unlock the partition */
- ef10_nvram_partn_unlock(enp, partn, NULL);
+ (void) ef10_nvram_partn_unlock(enp, partn, NULL);
EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
@@ -1840,7 +1840,7 @@ fail5:
fail4:
EFSYS_PROBE(fail4);
- ef10_nvram_partn_unlock(enp, partn, NULL);
+ (void) ef10_nvram_partn_unlock(enp, partn, NULL);
fail3:
EFSYS_PROBE(fail3);
@@ -1994,7 +1994,7 @@ ef10_nvram_partn_write(
__in efx_nic_t *enp,
__in uint32_t partn,
__in unsigned int offset,
- __out_bcount(size) caddr_t data,
+ __in_bcount(size) caddr_t data,
__in size_t size)
{
size_t chunk;
diff --git a/drivers/net/sfc/base/ef10_phy.c b/drivers/net/sfc/base/ef10_phy.c
index 81309f29..55403f7c 100644
--- a/drivers/net/sfc/base/ef10_phy.c
+++ b/drivers/net/sfc/base/ef10_phy.c
@@ -202,11 +202,10 @@ ef10_phy_get_link(
__out ef10_link_state_t *elsp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
- MC_CMD_GET_LINK_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LINK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
@@ -277,8 +276,8 @@ ef10_phy_reconfigure(
{
efx_port_t *epp = &(enp->en_port);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
- MC_CMD_SET_LINK_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN);
uint32_t cap_mask;
unsigned int led_mode;
unsigned int speed;
@@ -290,7 +289,6 @@ ef10_phy_reconfigure(
if (supported == B_FALSE)
goto out;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_LINK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
@@ -404,12 +402,11 @@ ef10_phy_verify(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
- MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN);
uint32_t state;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PHY_STATE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
@@ -523,22 +520,34 @@ ef10_bist_poll(
unsigned long *valuesp,
__in size_t count)
{
+ /*
+ * MCDI_CTL_SDU_LEN_MAX_V1 is large enough cover all BIST results,
+ * whilst not wasting stack.
+ */
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX_V1);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
- MCDI_CTL_SDU_LEN_MAX)];
uint32_t value_mask = 0;
uint32_t result;
efx_rc_t rc;
+ EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_LEN <=
+ MCDI_CTL_SDU_LEN_MAX_V1);
+ EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_SFT9001_LEN <=
+ MCDI_CTL_SDU_LEN_MAX_V1);
+ EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_MRSFP_LEN <=
+ MCDI_CTL_SDU_LEN_MAX_V1);
+ EFX_STATIC_ASSERT(MC_CMD_POLL_BIST_OUT_MEM_LEN <=
+ MCDI_CTL_SDU_LEN_MAX_V1);
+
_NOTE(ARGUNUSED(type))
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_POLL_BIST;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
req.emr_out_buf = payload;
- req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX_V1;
efx_mcdi_execute(enp, &req);
diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
index 849f674c..c5f48b6c 100644
--- a/drivers/net/sfc/base/ef10_rx.c
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -47,8 +47,8 @@ efx_mcdi_init_rxq(
__in uint32_t ps_bufsize)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN,
- MC_CMD_INIT_RXQ_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_EXT_IN_LEN,
+ MC_CMD_INIT_RXQ_EXT_OUT_LEN);
int npages = EFX_RXQ_NBUFS(size);
int i;
efx_qword_t *dma_addr;
@@ -65,7 +65,6 @@ efx_mcdi_init_rxq(
else
dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_RXQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN;
@@ -122,11 +121,10 @@ efx_mcdi_fini_rxq(
__in uint32_t instance)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
- MC_CMD_FINI_RXQ_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN,
+ MC_CMD_FINI_RXQ_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FINI_RXQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
@@ -164,8 +162,8 @@ efx_mcdi_rss_context_alloc(
__out uint32_t *rss_contextp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
- MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
+ MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
uint32_t rss_context;
uint32_t context_type;
efx_rc_t rc;
@@ -187,7 +185,6 @@ efx_mcdi_rss_context_alloc(
goto fail2;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
@@ -244,8 +241,8 @@ efx_mcdi_rss_context_free(
__in uint32_t rss_context)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
- MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_FREE_OUT_LEN);
efx_rc_t rc;
if (rss_context == EF10_RSS_CONTEXT_INVALID) {
@@ -253,7 +250,6 @@ efx_mcdi_rss_context_free(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
@@ -288,8 +284,8 @@ efx_mcdi_rss_context_set_flags(
__in efx_rx_hash_type_t type)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
- MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN);
efx_rc_t rc;
if (rss_context == EF10_RSS_CONTEXT_INVALID) {
@@ -297,7 +293,6 @@ efx_mcdi_rss_context_set_flags(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
@@ -344,8 +339,8 @@ efx_mcdi_rss_context_set_key(
__in size_t n)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
- MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN);
efx_rc_t rc;
if (rss_context == EF10_RSS_CONTEXT_INVALID) {
@@ -353,7 +348,6 @@ efx_mcdi_rss_context_set_key(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
@@ -401,8 +395,8 @@ efx_mcdi_rss_context_set_table(
__in size_t n)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
- MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN);
uint8_t *req_table;
int i, rc;
@@ -411,7 +405,6 @@ efx_mcdi_rss_context_set_table(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c
index 211d2655..32de4a37 100644
--- a/drivers/net/sfc/base/ef10_tx.c
+++ b/drivers/net/sfc/base/ef10_tx.c
@@ -55,8 +55,8 @@ efx_mcdi_init_txq(
__in efsys_mem_t *esmp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
- MC_CMD_INIT_TXQ_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
+ MC_CMD_INIT_TXQ_OUT_LEN);
efx_qword_t *dma_addr;
uint64_t addr;
int npages;
@@ -72,7 +72,6 @@ efx_mcdi_init_txq(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_INIT_TXQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
@@ -133,11 +132,10 @@ efx_mcdi_fini_txq(
__in uint32_t instance)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
- MC_CMD_FINI_TXQ_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN,
+ MC_CMD_FINI_TXQ_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FINI_TXQ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
@@ -286,7 +284,7 @@ ef10_tx_qpio_enable(
fail3:
EFSYS_PROBE(fail3);
- ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+ (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
fail2:
EFSYS_PROBE(fail2);
etp->et_pio_size = 0;
@@ -304,10 +302,12 @@ ef10_tx_qpio_disable(
if (etp->et_pio_size != 0) {
/* Unlink the piobuf from this TXQ */
- ef10_nic_pio_unlink(enp, etp->et_index);
+ if (ef10_nic_pio_unlink(enp, etp->et_index) != 0)
+ return;
/* Free the sub-allocated PIO block */
- ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+ (void) ef10_nic_pio_free(enp, etp->et_pio_bufnum,
+ etp->et_pio_blknum);
etp->et_pio_size = 0;
etp->et_pio_write_offset = 0;
}
diff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h
index 53fa37ac..fc569601 100644
--- a/drivers/net/sfc/base/efx_impl.h
+++ b/drivers/net/sfc/base/efx_impl.h
@@ -296,7 +296,6 @@ typedef struct efx_port_s {
uint32_t ep_default_adv_cap_mask;
uint32_t ep_phy_cap_mask;
boolean_t ep_mac_drain;
- boolean_t ep_mac_stats_pending;
#if EFSYS_OPT_BIST
efx_bist_type_t ep_current_bist;
#endif
@@ -556,7 +555,7 @@ efx_mcdi_nvram_write(
__in efx_nic_t *enp,
__in uint32_t partn,
__in uint32_t offset,
- __out_bcount(size) caddr_t data,
+ __in_bcount(size) caddr_t data,
__in size_t size);
__checkReturn efx_rc_t
diff --git a/drivers/net/sfc/base/efx_lic.c b/drivers/net/sfc/base/efx_lic.c
index 2cd05cc8..7ad6ed2a 100644
--- a/drivers/net/sfc/base/efx_lic.c
+++ b/drivers/net/sfc/base/efx_lic.c
@@ -340,12 +340,11 @@ efx_mcdi_fc_license_update_license(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MC_CMD_FC_IN_LICENSE_LEN];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FC_IN_LICENSE_LEN, 0);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
@@ -386,13 +385,12 @@ efx_mcdi_fc_license_get_key_stats(
__out efx_key_stats_t *eksp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FC_IN_LICENSE_LEN,
- MC_CMD_FC_OUT_LICENSE_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FC_IN_LICENSE_LEN,
+ MC_CMD_FC_OUT_LICENSE_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_FC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
@@ -711,8 +709,8 @@ efx_mcdi_licensed_app_state(
__out boolean_t *licensedp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN,
- MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LICENSED_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN);
uint32_t app_state;
efx_rc_t rc;
@@ -724,7 +722,6 @@ efx_mcdi_licensed_app_state(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN;
@@ -770,12 +767,11 @@ efx_mcdi_licensing_update_licenses(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MC_CMD_LICENSING_IN_LEN];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_IN_LEN, 0);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
@@ -813,13 +809,12 @@ efx_mcdi_licensing_get_key_stats(
__out efx_key_stats_t *eksp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LICENSING_IN_LEN,
- MC_CMD_LICENSING_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_IN_LEN,
+ MC_CMD_LICENSING_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
@@ -877,12 +872,11 @@ efx_mcdi_licensing_v3_update_licenses(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_V3_IN_LEN, 0);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING_V3;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
@@ -913,13 +907,12 @@ efx_mcdi_licensing_v3_report_license(
__out efx_key_stats_t *eksp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LICENSING_V3_IN_LEN,
- MC_CMD_LICENSING_V3_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_V3_IN_LEN,
+ MC_CMD_LICENSING_V3_OUT_LEN);
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LICENSING_V3;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
@@ -976,14 +969,13 @@ efx_mcdi_licensing_v3_app_state(
__out boolean_t *licensedp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN,
- MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN);
uint32_t app_state;
efx_rc_t rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN;
@@ -1034,27 +1026,15 @@ efx_mcdi_licensing_v3_get_id(
uint8_t *bufferp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LICENSING_GET_ID_V3_IN_LEN,
- MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LICENSING_GET_ID_V3_IN_LEN,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
efx_rc_t rc;
req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3;
-
- if (bufferp == NULL) {
- /* Request id type and length only */
- req.emr_in_buf = bufferp;
- req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
- req.emr_out_buf = bufferp;
- req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
- (void) memset(payload, 0, sizeof (payload));
- } else {
- /* Request full buffer */
- req.emr_in_buf = bufferp;
- req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
- req.emr_out_buf = bufferp;
- req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
- (void) memset(bufferp, 0, req.emr_out_length);
- }
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX;
efx_mcdi_execute_quiet(enp, &req);
@@ -1071,18 +1051,10 @@ efx_mcdi_licensing_v3_get_id(
*typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE);
*lengthp = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH);
- if (bufferp == NULL) {
- /* modify length requirements to indicate to caller the extra buffering
- ** needed to read the complete output.
- */
- *lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
- } else {
- /* Shift ID down to start of buffer */
- memmove(bufferp,
- bufferp + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST,
- *lengthp);
- memset(bufferp + (*lengthp), 0,
- MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST);
+ if (bufferp != NULL) {
+ memcpy(bufferp,
+ payload + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST,
+ MIN(buffer_size, *lengthp));
}
return (0);
diff --git a/drivers/net/sfc/base/efx_mac.c b/drivers/net/sfc/base/efx_mac.c
index 752e7205..0cf27317 100644
--- a/drivers/net/sfc/base/efx_mac.c
+++ b/drivers/net/sfc/base/efx_mac.c
@@ -751,16 +751,9 @@ efx_mac_stats_upload(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
- /*
- * Don't assert !ep_mac_stats_pending, because the client might
- * have failed to finalise statistics when previously stopping
- * the port.
- */
if ((rc = emop->emo_stats_upload(enp, esmp)) != 0)
goto fail1;
- epp->ep_mac_stats_pending = B_TRUE;
-
return (0);
fail1:
@@ -820,8 +813,6 @@ efx_mac_stats_update(
EFSYS_ASSERT(emop != NULL);
rc = emop->emo_stats_update(enp, esmp, essp, generationp);
- if (rc == 0)
- epp->ep_mac_stats_pending = B_FALSE;
return (rc);
}
diff --git a/drivers/net/sfc/base/efx_mcdi.c b/drivers/net/sfc/base/efx_mcdi.c
index c61b943c..098e2364 100644
--- a/drivers/net/sfc/base/efx_mcdi.c
+++ b/drivers/net/sfc/base/efx_mcdi.c
@@ -916,10 +916,10 @@ efx_mcdi_version(
__out_opt efx_mcdi_boot_t *statusp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MAX(MC_CMD_GET_VERSION_IN_LEN,
- MC_CMD_GET_VERSION_OUT_LEN),
- MAX(MC_CMD_GET_BOOT_STATUS_IN_LEN,
- MC_CMD_GET_BOOT_STATUS_OUT_LEN))];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MAX(MC_CMD_GET_VERSION_IN_LEN, MC_CMD_GET_BOOT_STATUS_IN_LEN),
+ MAX(MC_CMD_GET_VERSION_OUT_LEN,
+ MC_CMD_GET_BOOT_STATUS_OUT_LEN));
efx_word_t *ver_words;
uint16_t version[4];
uint32_t build;
@@ -928,7 +928,6 @@ efx_mcdi_version(
EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_VERSION;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN;
@@ -969,7 +968,6 @@ version:
goto out;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN;
@@ -1034,12 +1032,11 @@ efx_mcdi_get_capabilities(
__out_opt uint32_t *tso2ncp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
- MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V2_OUT_LEN);
boolean_t v2_capable;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_CAPABILITIES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
@@ -1102,7 +1099,8 @@ efx_mcdi_do_reboot(
__in efx_nic_t *enp,
__in boolean_t after_assertion)
{
- uint8_t payload[MAX(MC_CMD_REBOOT_IN_LEN, MC_CMD_REBOOT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_REBOOT_IN_LEN,
+ MC_CMD_REBOOT_OUT_LEN);
efx_mcdi_req_t req;
efx_rc_t rc;
@@ -1115,7 +1113,6 @@ efx_mcdi_do_reboot(
*/
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_REBOOT;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
@@ -1166,8 +1163,8 @@ efx_mcdi_read_assertion(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN,
- MC_CMD_GET_ASSERTS_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_ASSERTS_IN_LEN,
+ MC_CMD_GET_ASSERTS_OUT_LEN);
const char *reason;
unsigned int flags;
unsigned int index;
@@ -1268,11 +1265,10 @@ efx_mcdi_drv_attach(
__in boolean_t attach)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_DRV_ATTACH_IN_LEN,
- MC_CMD_DRV_ATTACH_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRV_ATTACH_IN_LEN,
+ MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_DRV_ATTACH;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
@@ -1319,11 +1315,10 @@ efx_mcdi_get_board_cfg(
{
efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
- MC_CMD_GET_BOARD_CFG_OUT_LENMIN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMIN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_BOARD_CFG;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
@@ -1399,11 +1394,10 @@ efx_mcdi_get_resource_limits(
__out_opt uint32_t *ntxqp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
- MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
+ MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN;
@@ -1446,11 +1440,10 @@ efx_mcdi_get_phy_cfg(
efx_port_t *epp = &(enp->en_port);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN,
- MC_CMD_GET_PHY_CFG_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_CFG_IN_LEN,
+ MC_CMD_GET_PHY_CFG_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PHY_CFG;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN;
@@ -1687,11 +1680,10 @@ efx_mcdi_bist_start(
__in efx_bist_type_t type)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_START_BIST_IN_LEN,
- MC_CMD_START_BIST_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_START_BIST_IN_LEN,
+ MC_CMD_START_BIST_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_START_BIST;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_START_BIST_IN_LEN;
@@ -1750,11 +1742,10 @@ efx_mcdi_log_ctrl(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LOG_CTRL_IN_LEN,
- MC_CMD_LOG_CTRL_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LOG_CTRL_IN_LEN,
+ MC_CMD_LOG_CTRL_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_LOG_CTRL;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
@@ -1799,8 +1790,8 @@ efx_mcdi_mac_stats(
__in uint16_t period_ms)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
- MC_CMD_MAC_STATS_OUT_DMA_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_MAC_STATS_IN_LEN,
+ MC_CMD_MAC_STATS_OUT_DMA_LEN);
int clear = (action == EFX_STATS_CLEAR);
int upload = (action == EFX_STATS_UPLOAD);
int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
@@ -1808,7 +1799,6 @@ efx_mcdi_mac_stats(
int disable = (action == EFX_STATS_DISABLE);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_MAC_STATS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
@@ -1958,11 +1948,10 @@ efx_mcdi_get_function_info(
__out_opt uint32_t *vfp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN,
- MC_CMD_GET_FUNCTION_INFO_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_FUNCTION_INFO_IN_LEN,
+ MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
@@ -2003,11 +1992,10 @@ efx_mcdi_privilege_mask(
__out uint32_t *maskp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN,
- MC_CMD_PRIVILEGE_MASK_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MASK_IN_LEN,
+ MC_CMD_PRIVILEGE_MASK_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
@@ -2052,11 +2040,10 @@ efx_mcdi_set_workaround(
__out_opt uint32_t *flagsp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_WORKAROUND_IN_LEN,
- MC_CMD_WORKAROUND_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_WORKAROUND_IN_LEN,
+ MC_CMD_WORKAROUND_EXT_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_WORKAROUND;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN;
@@ -2096,10 +2083,9 @@ efx_mcdi_get_workarounds(
__out_opt uint32_t *enabledp)
{
efx_mcdi_req_t req;
- uint8_t payload[MC_CMD_GET_WORKAROUNDS_OUT_LEN];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_WORKAROUNDS_OUT_LEN, 0);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_WORKAROUNDS;
req.emr_in_buf = NULL;
req.emr_in_length = 0;
@@ -2145,14 +2131,13 @@ efx_mcdi_get_phy_media_info(
__out_bcount(len) uint8_t *data)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
- MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
- EFX_PHY_MEDIA_INFO_PAGE_SIZE))];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE));
efx_rc_t rc;
EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
diff --git a/drivers/net/sfc/base/efx_mcdi.h b/drivers/net/sfc/base/efx_mcdi.h
index 21727713..1f355c02 100644
--- a/drivers/net/sfc/base/efx_mcdi.h
+++ b/drivers/net/sfc/base/efx_mcdi.h
@@ -400,6 +400,17 @@ efx_mcdi_phy_module_get_info(
(((mask) & (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) == \
(MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv))
+/*
+ * The buffer size must be a multiple of dword to ensure that MCDI works
+ * properly with Siena based boards (which use on-chip buffer). Also, it
+ * should be at minimum the size of two dwords to allow space for extended
+ * error responses if the request/response buffer sizes are smaller.
+ */
+#define EFX_MCDI_DECLARE_BUF(_name, _in_len, _out_len) \
+ uint8_t _name[P2ROUNDUP(MAX(MAX(_in_len, _out_len), \
+ (2 * sizeof (efx_dword_t))), \
+ sizeof (efx_dword_t))] = {0}
+
typedef enum efx_mcdi_feature_id_e {
EFX_MCDI_FEATURE_FW_UPDATE = 0,
EFX_MCDI_FEATURE_LINK_CONTROL,
diff --git a/drivers/net/sfc/base/efx_nic.c b/drivers/net/sfc/base/efx_nic.c
index 76caa744..5513ef22 100644
--- a/drivers/net/sfc/base/efx_nic.c
+++ b/drivers/net/sfc/base/efx_nic.c
@@ -587,7 +587,7 @@ efx_nic_reset(
*/
mod_flags = enp->en_mod_flags;
mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
- EFX_MOD_VPD | EFX_MOD_MON);
+ EFX_MOD_VPD | EFX_MOD_MON);
EFSYS_ASSERT3U(mod_flags, ==, 0);
if (mod_flags != 0) {
rc = EINVAL;
@@ -612,6 +612,7 @@ efx_nic_cfg_get(
__in efx_nic_t *enp)
{
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
return (&(enp->en_nic_cfg));
}
@@ -933,13 +934,12 @@ efx_mcdi_get_loopback_modes(
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
- MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
+ MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
efx_qword_t mask;
efx_qword_t modes;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN;
diff --git a/drivers/net/sfc/base/efx_nvram.c b/drivers/net/sfc/base/efx_nvram.c
index 6ee2a71d..0191acb3 100644
--- a/drivers/net/sfc/base/efx_nvram.c
+++ b/drivers/net/sfc/base/efx_nvram.c
@@ -503,12 +503,11 @@ efx_mcdi_nvram_partitions(
__out unsigned int *npartnp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_PARTITIONS_IN_LEN,
- MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_PARTITIONS_IN_LEN,
+ MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
unsigned int npartn;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_PARTITIONS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_PARTITIONS_IN_LEN;
@@ -566,11 +565,10 @@ efx_mcdi_nvram_metadata(
__in size_t size)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_METADATA_IN_LEN,
- MC_CMD_NVRAM_METADATA_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_METADATA_IN_LEN,
+ MC_CMD_NVRAM_METADATA_OUT_LENMAX);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_METADATA;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_METADATA_IN_LEN;
@@ -656,12 +654,11 @@ efx_mcdi_nvram_info(
__out_opt uint32_t *erase_sizep,
__out_opt uint32_t *write_sizep)
{
- uint8_t payload[MAX(MC_CMD_NVRAM_INFO_IN_LEN,
- MC_CMD_NVRAM_INFO_V2_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_INFO_IN_LEN,
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN);
efx_mcdi_req_t req;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_INFO;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN;
@@ -717,12 +714,11 @@ efx_mcdi_nvram_update_start(
__in efx_nic_t *enp,
__in uint32_t partn)
{
- uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN,
- MC_CMD_NVRAM_UPDATE_START_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_START_OUT_LEN);
efx_mcdi_req_t req;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_UPDATE_START;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN;
@@ -759,8 +755,8 @@ efx_mcdi_nvram_read(
__in uint32_t mode)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_READ_IN_V2_LEN,
- MC_CMD_NVRAM_READ_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_READ_IN_V2_LEN,
+ MC_CMD_NVRAM_READ_OUT_LENMAX);
efx_rc_t rc;
if (size > MC_CMD_NVRAM_READ_OUT_LENMAX) {
@@ -768,7 +764,6 @@ efx_mcdi_nvram_read(
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_READ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_READ_IN_V2_LEN;
@@ -814,11 +809,10 @@ efx_mcdi_nvram_erase(
__in size_t size)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_ERASE_IN_LEN,
- MC_CMD_NVRAM_ERASE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_ERASE_IN_LEN,
+ MC_CMD_NVRAM_ERASE_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_ERASE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN;
@@ -854,27 +848,31 @@ efx_mcdi_nvram_write(
__in efx_nic_t *enp,
__in uint32_t partn,
__in uint32_t offset,
- __out_bcount(size) caddr_t data,
+ __in_bcount(size) caddr_t data,
__in size_t size)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MCDI_CTL_SDU_LEN_MAX_V1,
- MCDI_CTL_SDU_LEN_MAX_V2)];
+ uint8_t *payload;
efx_rc_t rc;
size_t max_data_size;
+ size_t payload_len = enp->en_nic_cfg.enc_mcdi_max_payload_length;
- max_data_size = enp->en_nic_cfg.enc_mcdi_max_payload_length
- - MC_CMD_NVRAM_WRITE_IN_LEN(0);
- EFSYS_ASSERT3U(enp->en_nic_cfg.enc_mcdi_max_payload_length, >, 0);
- EFSYS_ASSERT3U(max_data_size, <,
- enp->en_nic_cfg.enc_mcdi_max_payload_length);
+ max_data_size = payload_len - MC_CMD_NVRAM_WRITE_IN_LEN(0);
+ EFSYS_ASSERT3U(payload_len, >, 0);
+ EFSYS_ASSERT3U(max_data_size, <, payload_len);
if (size > max_data_size) {
rc = EINVAL;
goto fail1;
}
- (void) memset(payload, 0, sizeof (payload));
+ EFSYS_KMEM_ALLOC(enp->en_esip, payload_len, payload);
+ if (payload == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ (void) memset(payload, 0, payload_len);
req.emr_cmd = MC_CMD_NVRAM_WRITE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(size);
@@ -892,11 +890,16 @@ efx_mcdi_nvram_write(
if (req.emr_rc != 0) {
rc = req.emr_rc;
- goto fail2;
+ goto fail3;
}
+ EFSYS_KMEM_FREE(enp->en_esip, payload_len, payload);
+
return (0);
+fail3:
+ EFSYS_PROBE(fail3);
+ EFSYS_KMEM_FREE(enp->en_esip, payload_len, payload);
fail2:
EFSYS_PROBE(fail2);
fail1:
@@ -919,12 +922,11 @@ efx_mcdi_nvram_update_finish(
{
const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN,
- MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
uint32_t result = 0; /* FIXME: use MC_CMD_NVRAM_VERIFY_RC_UNKNOWN */
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN;
@@ -991,12 +993,11 @@ efx_mcdi_nvram_test(
__in uint32_t partn)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_TEST_IN_LEN,
- MC_CMD_NVRAM_TEST_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_TEST_IN_LEN,
+ MC_CMD_NVRAM_TEST_OUT_LEN);
int result;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_TEST;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN;
diff --git a/drivers/net/sfc/base/efx_phy.c b/drivers/net/sfc/base/efx_phy.c
index 752cd52e..af1ad981 100644
--- a/drivers/net/sfc/base/efx_phy.c
+++ b/drivers/net/sfc/base/efx_phy.c
@@ -309,7 +309,7 @@ efx_phy_module_get_info(
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT(data != NULL);
- if ((uint32_t)offset + len > 0xff) {
+ if ((uint32_t)offset + len > 0x100) {
rc = EINVAL;
goto fail1;
}
diff --git a/drivers/net/sfc/base/efx_port.c b/drivers/net/sfc/base/efx_port.c
index ec8a1575..7584e5a8 100644
--- a/drivers/net/sfc/base/efx_port.c
+++ b/drivers/net/sfc/base/efx_port.c
@@ -61,7 +61,7 @@ efx_port_init(
epp->ep_emop->emo_reconfigure(enp);
/* Pick up current phy capababilities */
- efx_port_poll(enp, NULL);
+ (void) efx_port_poll(enp, NULL);
/*
* Turn on the PHY if available, otherwise reset it, and
@@ -109,7 +109,6 @@ efx_port_poll(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
- EFSYS_ASSERT(!epp->ep_mac_stats_pending);
if (link_modep == NULL)
link_modep = &ignore_link_mode;
diff --git a/drivers/net/sfc/base/mcdi_mon.c b/drivers/net/sfc/base/mcdi_mon.c
index c5360c31..8fb4113c 100644
--- a/drivers/net/sfc/base/mcdi_mon.c
+++ b/drivers/net/sfc/base/mcdi_mon.c
@@ -30,6 +30,7 @@
#include "efx.h"
#include "efx_impl.h"
+#include "mcdi_mon.h"
#if EFSYS_OPT_MON_MCDI
@@ -314,9 +315,15 @@ efx_mcdi_read_sensors(
__in uint32_t size)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_READ_SENSORS_EXT_IN_LEN,
- MC_CMD_READ_SENSORS_EXT_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_READ_SENSORS_EXT_IN_LEN,
+ MC_CMD_READ_SENSORS_EXT_OUT_LEN);
uint32_t addr_lo, addr_hi;
+ efx_rc_t rc;
+
+ if (EFSYS_MEM_SIZE(esmp) < size) {
+ rc = EINVAL;
+ goto fail1;
+ }
req.emr_cmd = MC_CMD_READ_SENSORS;
req.emr_in_buf = payload;
@@ -334,6 +341,11 @@ efx_mcdi_read_sensors(
efx_mcdi_execute(enp, &req);
return (req.emr_rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
}
static __checkReturn efx_rc_t
@@ -342,8 +354,8 @@ efx_mcdi_sensor_info_npages(
__out uint32_t *npagesp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
- MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX);
int page;
efx_rc_t rc;
@@ -386,8 +398,8 @@ efx_mcdi_sensor_info(
__in size_t npages)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
- MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX);
uint32_t page;
efx_rc_t rc;
diff --git a/drivers/net/sfc/base/medford_nic.c b/drivers/net/sfc/base/medford_nic.c
index d361d654..9c6e8ea7 100644
--- a/drivers/net/sfc/base/medford_nic.c
+++ b/drivers/net/sfc/base/medford_nic.c
@@ -40,12 +40,11 @@ efx_mcdi_get_rxdp_config(
__out uint32_t *end_paddingp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
- MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN,
+ MC_CMD_GET_RXDP_CONFIG_OUT_LEN);
uint32_t end_padding;
efx_rc_t rc;
- memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
diff --git a/drivers/net/sfc/base/siena_mac.c b/drivers/net/sfc/base/siena_mac.c
index 29bbff8a..427fcde6 100644
--- a/drivers/net/sfc/base/siena_mac.c
+++ b/drivers/net/sfc/base/siena_mac.c
@@ -92,14 +92,13 @@ siena_mac_reconfigure(
efx_port_t *epp = &(enp->en_port);
efx_oword_t multicast_hash[2];
efx_mcdi_req_t req;
- uint8_t payload[MAX(MAX(MC_CMD_SET_MAC_IN_LEN,
- MC_CMD_SET_MAC_OUT_LEN),
- MAX(MC_CMD_SET_MCAST_HASH_IN_LEN,
- MC_CMD_SET_MCAST_HASH_OUT_LEN))];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MAX(MC_CMD_SET_MAC_IN_LEN, MC_CMD_SET_MCAST_HASH_IN_LEN),
+ MAX(MC_CMD_SET_MAC_OUT_LEN, MC_CMD_SET_MCAST_HASH_OUT_LEN));
+
unsigned int fcntl;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
diff --git a/drivers/net/sfc/base/siena_nic.c b/drivers/net/sfc/base/siena_nic.c
index fcc8f151..09b67a77 100644
--- a/drivers/net/sfc/base/siena_nic.c
+++ b/drivers/net/sfc/base/siena_nic.c
@@ -42,11 +42,10 @@ siena_nic_get_partn_mask(
__out unsigned int *maskp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_NVRAM_TYPES_IN_LEN,
- MC_CMD_NVRAM_TYPES_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_NVRAM_TYPES_IN_LEN,
+ MC_CMD_NVRAM_TYPES_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_NVRAM_TYPES;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_TYPES_IN_LEN;
diff --git a/drivers/net/sfc/base/siena_nvram.c b/drivers/net/sfc/base/siena_nvram.c
index af4cf172..7556df16 100644
--- a/drivers/net/sfc/base/siena_nvram.c
+++ b/drivers/net/sfc/base/siena_nvram.c
@@ -433,12 +433,11 @@ siena_nvram_get_subtype(
__out uint32_t *subtypep)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
- MC_CMD_GET_BOARD_CFG_OUT_LENMAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
efx_word_t *fw_list;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_BOARD_CFG;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
diff --git a/drivers/net/sfc/base/siena_phy.c b/drivers/net/sfc/base/siena_phy.c
index b90ccabc..7addad7d 100644
--- a/drivers/net/sfc/base/siena_phy.c
+++ b/drivers/net/sfc/base/siena_phy.c
@@ -193,11 +193,10 @@ siena_phy_get_link(
__out siena_link_state_t *slsp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
- MC_CMD_GET_LINK_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_LINK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
@@ -268,16 +267,14 @@ siena_phy_reconfigure(
{
efx_port_t *epp = &(enp->en_port);
efx_mcdi_req_t req;
- uint8_t payload[MAX(MAX(MC_CMD_SET_ID_LED_IN_LEN,
- MC_CMD_SET_ID_LED_OUT_LEN),
- MAX(MC_CMD_SET_LINK_IN_LEN,
- MC_CMD_SET_LINK_OUT_LEN))];
+ EFX_MCDI_DECLARE_BUF(payload,
+ MAX(MC_CMD_SET_ID_LED_IN_LEN, MC_CMD_SET_LINK_IN_LEN),
+ MAX(MC_CMD_SET_ID_LED_OUT_LEN, MC_CMD_SET_LINK_OUT_LEN));
uint32_t cap_mask;
unsigned int led_mode;
unsigned int speed;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_SET_LINK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
@@ -383,12 +380,11 @@ siena_phy_verify(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
- MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN);
uint32_t state;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_GET_PHY_STATE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
@@ -552,11 +548,10 @@ siena_phy_stats_update(
uint32_t vmask = encp->enc_mcdi_phy_stat_mask;
uint64_t smask;
efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_PHY_STATS_IN_LEN,
- MC_CMD_PHY_STATS_OUT_DMA_LEN)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PHY_STATS_IN_LEN,
+ MC_CMD_PHY_STATS_OUT_DMA_LEN);
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_PHY_STATS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_PHY_STATS_IN_LEN;
@@ -641,14 +636,13 @@ siena_phy_bist_poll(
__in size_t count)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
- MCDI_CTL_SDU_LEN_MAX)];
+ EFX_MCDI_DECLARE_BUF(payload, MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX);
uint32_t value_mask = 0;
efx_mcdi_req_t req;
uint32_t result;
efx_rc_t rc;
- (void) memset(payload, 0, sizeof (payload));
req.emr_cmd = MC_CMD_POLL_BIST;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
index bd2cbd09..7069c534 100644
--- a/drivers/net/sfc/sfc_ef10_rx.c
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -489,11 +489,11 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t n_rx_pkts;
efx_qword_t rx_ev;
+ n_rx_pkts = sfc_ef10_rx_prepared(rxq, rx_pkts, nb_pkts);
+
if (unlikely(rxq->flags &
(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
- return 0;
-
- n_rx_pkts = sfc_ef10_rx_prepared(rxq, rx_pkts, nb_pkts);
+ goto done;
evq_old_read_ptr = rxq->evq_read_ptr;
while (n_rx_pkts != nb_pkts && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
@@ -513,6 +513,7 @@ sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
/* It is not a problem if we refill in the case of exception */
sfc_ef10_rx_qrefill(rxq);
+done:
return n_rx_pkts;
}
@@ -646,8 +647,9 @@ sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
{
struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
- rxq->prepared = 0;
- rxq->completed = rxq->added = 0;
+ SFC_ASSERT(rxq->prepared == 0);
+ SFC_ASSERT(rxq->completed == 0);
+ SFC_ASSERT(rxq->added == 0);
sfc_ef10_rx_qrefill(rxq);
@@ -694,12 +696,16 @@ sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
unsigned int i;
struct sfc_ef10_rx_sw_desc *rxd;
+ rxq->prepared = 0;
+
for (i = rxq->completed; i != rxq->added; ++i) {
rxd = &rxq->sw_ring[i & rxq->ptr_mask];
rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
rxd->mbuf = NULL;
}
+ rxq->completed = rxq->added = 0;
+
rxq->flags &= ~SFC_EF10_RXQ_STARTED;
}
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 1b160b5e..005df401 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -35,6 +35,7 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
#include <rte_errno.h>
+#include <rte_string_fns.h>
#include "efx.h"
@@ -458,8 +459,6 @@ sfc_rx_queue_release(void *queue)
sfc_log_init(sa, "RxQ=%u", sw_index);
- sa->eth_dev->data->rx_queues[sw_index] = NULL;
-
sfc_rx_qfini(sa, sw_index);
sfc_adapter_unlock(sa);
@@ -514,9 +513,6 @@ sfc_tx_queue_release(void *queue)
sfc_adapter_lock(sa);
- SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
- sa->eth_dev->data->tx_queues[sw_index] = NULL;
-
sfc_tx_qfini(sa, sw_index);
sfc_adapter_unlock(sa);
@@ -666,7 +662,7 @@ sfc_xstats_get_names(struct rte_eth_dev *dev,
for (i = 0; i < EFX_MAC_NSTATS; ++i) {
if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
if (xstats_names != NULL && nstats < xstats_count)
- strncpy(xstats_names[nstats].name,
+ strlcpy(xstats_names[nstats].name,
efx_mac_stat_name(sa->nic, i),
sizeof(xstats_names[0].name));
nstats++;
@@ -744,9 +740,8 @@ sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
char *name = xstats_names[nb_written++].name;
- strncpy(name, efx_mac_stat_name(sa->nic, i),
+ strlcpy(name, efx_mac_stat_name(sa->nic, i),
sizeof(xstats_names[0].name));
- name[sizeof(xstats_names[0].name) - 1] = '\0';
}
++nb_supported;
@@ -1230,14 +1225,10 @@ sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
struct sfc_adapter *sa = dev->data->dev_private;
- struct sfc_port *port = &sa->port;
- if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
return -ENOTSUP;
- if (sa->rss_channels == 0)
- return -EINVAL;
-
sfc_adapter_lock(sa);
/*
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
index 8fc93d68..238aa5ea 100644
--- a/drivers/net/sfc/sfc_rx.c
+++ b/drivers/net/sfc/sfc_rx.c
@@ -1028,6 +1028,7 @@ sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
struct sfc_rxq *rxq;
SFC_ASSERT(sw_index < sa->rxq_count);
+ sa->eth_dev->data->rx_queues[sw_index] = NULL;
rxq_info = &sa->rxq_info[sw_index];
@@ -1340,7 +1341,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
goto fail_check_mode;
if (nb_rx_queues == sa->rxq_count)
- goto done;
+ goto configure_rss;
if (sa->rxq_info == NULL) {
rc = ENOMEM;
@@ -1377,6 +1378,7 @@ sfc_rx_configure(struct sfc_adapter *sa)
sa->rxq_count++;
}
+configure_rss:
#if EFSYS_OPT_RX_SCALE
sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
MIN(sa->rxq_count, EFX_MAXRSS) : 0;
@@ -1395,7 +1397,6 @@ sfc_rx_configure(struct sfc_adapter *sa)
}
#endif
-done:
return 0;
fail_rx_process_adv_conf_rss:
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index d1320f46..9b1e6541 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -239,6 +239,8 @@ sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
sfc_log_init(sa, "TxQ = %u", sw_index);
SFC_ASSERT(sw_index < sa->txq_count);
+ sa->eth_dev->data->tx_queues[sw_index] = NULL;
+
txq_info = &sa->txq_info[sw_index];
txq = txq_info->txq;
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index 3c695e4d..62468616 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -79,28 +79,13 @@ static const char *pmd_valid_args[] = {
NULL
};
-static const struct rte_eth_dev_info pmd_dev_info = {
- .min_rx_bufsize = 0,
- .max_rx_pktlen = UINT32_MAX,
- .max_rx_queues = UINT16_MAX,
- .max_tx_queues = UINT16_MAX,
- .rx_desc_lim = {
- .nb_max = UINT16_MAX,
- .nb_min = 0,
- .nb_align = 1,
- },
- .tx_desc_lim = {
- .nb_max = UINT16_MAX,
- .nb_min = 0,
- .nb_align = 1,
- },
-};
-
static void
pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
- memcpy(dev_info, &pmd_dev_info, sizeof(*dev_info));
+ dev_info->max_rx_pktlen = UINT32_MAX;
+ dev_info->max_rx_queues = UINT16_MAX;
+ dev_info->max_tx_queues = UINT16_MAX;
}
static int
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
index c38c02ef..466624ae 100644
--- a/drivers/net/tap/rte_eth_tap.c
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -222,7 +222,7 @@ tun_alloc(struct pmd_internals *pmd)
return fd;
error:
- if (fd > 0)
+ if (fd >= 0)
close(fd);
return -1;
}
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index 06cbc463..7e8688b4 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -89,6 +89,14 @@ fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
entry->buff[0] = sqe.buff[0];
}
+static inline void __hot
+fill_sq_desc_header_zero_w1(union sq_entry_t *entry,
+ struct rte_mbuf *pkt)
+{
+ fill_sq_desc_header(entry, pkt);
+ entry->buff[1] = 0ULL;
+}
+
void __hot
nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
{
@@ -232,7 +240,7 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
used_bufs += nb_segs;
txbuffs[tail] = NULL;
- fill_sq_desc_header(desc_ptr + tail, pkt);
+ fill_sq_desc_header_zero_w1(desc_ptr + tail, pkt);
tail = (tail + 1) & qlen_mask;
txbuffs[tail] = pkt;
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 6091b322..0ba25b53 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -1293,4 +1293,7 @@ RTE_PMD_REGISTER_VDEV(net_vhost, pmd_vhost_drv);
RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
"iface=<ifc> "
- "queues=<int>");
+ "queues=<int> "
+ "client=<0|1> "
+ "dequeue-zero-copy=<0|1> "
+ "iommu-support=<0|1>");
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 4da1ba32..6eece5bc 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -1622,11 +1622,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
if (ret < 0)
goto out;
- /* Setup interrupt callback */
- if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- rte_intr_callback_register(eth_dev->intr_handle,
- virtio_interrupt_handler, eth_dev);
-
return 0;
out:
@@ -1652,11 +1647,6 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
- /* reset interrupt callback */
- if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- rte_intr_callback_unregister(eth_dev->intr_handle,
- virtio_interrupt_handler,
- eth_dev);
if (eth_dev->device)
rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
@@ -1833,6 +1823,12 @@ virtio_dev_start(struct rte_eth_dev *dev)
dev->data->dev_conf.intr_conf.rxq) {
virtio_intr_disable(dev);
+ /* Setup interrupt callback */
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ rte_intr_callback_register(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+
if (virtio_intr_enable(dev) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -EIO;
@@ -1947,9 +1943,17 @@ virtio_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "stop");
- if (intr_conf->lsc || intr_conf->rxq)
+ if (intr_conf->lsc || intr_conf->rxq) {
virtio_intr_disable(dev);
+ /* Reset interrupt callback */
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ rte_intr_callback_unregister(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+ }
+ }
+
hw->started = 0;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_write_link_status(dev, &link);
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index 9574498f..249ec6d3 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -596,16 +596,18 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
}
ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
- if (ret < 0) {
- PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
+ if (ret != 1) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci capability list, ret %d", ret);
return -1;
}
while (pos) {
- ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
- if (ret < 0) {
- PMD_INIT_LOG(ERR,
- "failed to read pci cap at pos: %x", pos);
+ ret = rte_pci_read_config(dev, &cap, 2, pos);
+ if (ret != 2) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci cap at pos: %x ret %d",
+ pos, ret);
break;
}
@@ -615,7 +617,16 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
* 1st byte is cap ID; 2nd byte is the position of next
* cap; next two bytes are the flags.
*/
- uint16_t flags = ((uint16_t *)&cap)[1];
+ uint16_t flags;
+
+ ret = rte_pci_read_config(dev, &flags, sizeof(flags),
+ pos + 2);
+ if (ret != sizeof(flags)) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci cap at pos:"
+ " %x ret %d", pos + 2, ret);
+ break;
+ }
if (flags & PCI_MSIX_ENABLE)
hw->use_msix = VIRTIO_MSIX_ENABLED;
@@ -630,6 +641,14 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
goto next;
}
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret != sizeof(cap)) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci cap at pos: %x ret %d",
+ pos, ret);
+ break;
+ }
+
PMD_INIT_LOG(DEBUG,
"[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
@@ -639,9 +658,15 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
hw->common_cfg = get_cfg_addr(dev, &cap);
break;
case VIRTIO_PCI_CAP_NOTIFY_CFG:
- rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ ret = rte_pci_read_config(dev,
+ &hw->notify_off_multiplier,
4, pos + sizeof(cap));
- hw->notify_base = get_cfg_addr(dev, &cap);
+ if (ret != 4)
+ PMD_INIT_LOG(DEBUG,
+ "failed to read notify_off_multiplier, ret %d",
+ ret);
+ else
+ hw->notify_base = get_cfg_addr(dev, &cap);
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
hw->dev_cfg = get_cfg_addr(dev, &cap);
@@ -718,25 +743,37 @@ enum virtio_msix_status
vtpci_msix_detect(struct rte_pci_device *dev)
{
uint8_t pos;
- struct virtio_pci_cap cap;
int ret;
ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
- if (ret < 0) {
- PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
+ if (ret != 1) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci capability list, ret %d", ret);
return VIRTIO_MSIX_NONE;
}
while (pos) {
- ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
- if (ret < 0) {
- PMD_INIT_LOG(ERR,
- "failed to read pci cap at pos: %x", pos);
+ uint8_t cap[2];
+
+ ret = rte_pci_read_config(dev, cap, sizeof(cap), pos);
+ if (ret != sizeof(cap)) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci cap at pos: %x ret %d",
+ pos, ret);
break;
}
- if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
- uint16_t flags = ((uint16_t *)&cap)[1];
+ if (cap[0] == PCI_CAP_ID_MSIX) {
+ uint16_t flags;
+
+ ret = rte_pci_read_config(dev, &flags, sizeof(flags),
+ pos + sizeof(cap));
+ if (ret != sizeof(flags)) {
+ PMD_INIT_LOG(DEBUG,
+ "failed to read pci cap at pos:"
+ " %x ret %d", pos + 2, ret);
+ break;
+ }
if (flags & PCI_MSIX_ENABLE)
return VIRTIO_MSIX_ENABLED;
@@ -744,7 +781,7 @@ vtpci_msix_detect(struct rte_pci_device *dev)
return VIRTIO_MSIX_DISABLED;
}
- pos = cap.cap_next;
+ pos = cap[1];
}
return VIRTIO_MSIX_NONE;
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
index 68d28b13..35020012 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
@@ -189,8 +189,8 @@ prepare_vhost_memory_kernel(void)
(1ULL << VIRTIO_NET_F_HOST_TSO6) | \
(1ULL << VIRTIO_NET_F_CSUM))
-static int
-tap_supporte_mq(void)
+static unsigned int
+tap_support_features(void)
{
int tapfd;
unsigned int tap_features;
@@ -209,7 +209,7 @@ tap_supporte_mq(void)
}
close(tapfd);
- return tap_features & IFF_MULTI_QUEUE;
+ return tap_features;
}
static int
@@ -223,6 +223,7 @@ vhost_kernel_ioctl(struct virtio_user_dev *dev,
struct vhost_memory_kernel *vm = NULL;
int vhostfd;
unsigned int queue_sel;
+ unsigned int features;
PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
@@ -276,17 +277,20 @@ vhost_kernel_ioctl(struct virtio_user_dev *dev,
}
if (!ret && req_kernel == VHOST_GET_FEATURES) {
+ features = tap_support_features();
/* with tap as the backend, all these features are supported
* but not claimed by vhost-net, so we add them back when
* reporting to upper layer.
*/
- *((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
- *((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
+ if (features & IFF_VNET_HDR) {
+ *((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
+ *((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
+ }
/* vhost_kernel will not declare this feature, but it does
* support multi-queue.
*/
- if (tap_supporte_mq())
+ if (features & IFF_MULTI_QUEUE)
*(uint64_t *)arg |= (1ull << VIRTIO_NET_F_MQ);
}
@@ -380,7 +384,8 @@ vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
else
hdr_size = sizeof(struct virtio_net_hdr);
- tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq);
+ tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq,
+ (char *)dev->mac_addr, dev->features);
if (tapfd < 0) {
PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
return -1;
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
index 689a5cff..e9ee7740 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -36,26 +36,64 @@
#include <sys/stat.h>
#include <fcntl.h>
#include <net/if.h>
+#include <net/if_arp.h>
#include <errno.h>
#include <string.h>
#include <limits.h>
+#include <rte_ether.h>
+
#include "vhost_kernel_tap.h"
#include "../virtio_logs.h"
+#include "../virtio_pci.h"
+
+static int
+vhost_kernel_tap_set_offload(int fd, uint64_t features)
+{
+ unsigned int offload = 0;
+
+ if (features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
+ offload |= TUN_F_CSUM;
+ if (features & (1ULL << VIRTIO_NET_F_GUEST_TSO4))
+ offload |= TUN_F_TSO4;
+ if (features & (1ULL << VIRTIO_NET_F_GUEST_TSO6))
+ offload |= TUN_F_TSO6;
+ if (features & ((1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6)) &&
+ (features & (1ULL << VIRTIO_NET_F_GUEST_ECN)))
+ offload |= TUN_F_TSO_ECN;
+ if (features & (1ULL << VIRTIO_NET_F_GUEST_UFO))
+ offload |= TUN_F_UFO;
+ }
+
+ if (offload != 0) {
+ /* Check if our kernel supports TUNSETOFFLOAD */
+ if (ioctl(fd, TUNSETOFFLOAD, 0) != 0 && errno == EINVAL) {
+ PMD_DRV_LOG(ERR, "Kernel does't support TUNSETOFFLOAD\n");
+ return -ENOTSUP;
+ }
+
+ if (ioctl(fd, TUNSETOFFLOAD, offload) != 0) {
+ offload &= ~TUN_F_UFO;
+ if (ioctl(fd, TUNSETOFFLOAD, offload) != 0) {
+ PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
int
-vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
+vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
+ const char *mac, uint64_t features)
{
unsigned int tap_features;
int sndbuf = INT_MAX;
struct ifreq ifr;
int tapfd;
- unsigned int offload =
- TUN_F_CSUM |
- TUN_F_TSO4 |
- TUN_F_TSO6 |
- TUN_F_TSO_ECN |
- TUN_F_UFO;
/* TODO:
* 1. verify we can get/set vnet_hdr_len, tap_probe_vnet_hdr_len
@@ -115,13 +153,15 @@ vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
goto error;
}
- /* TODO: before set the offload capabilities, we'd better (1) check
- * negotiated features to see if necessary to offload; (2) query tap
- * to see if it supports the offload capabilities.
- */
- if (ioctl(tapfd, TUNSETOFFLOAD, offload) != 0)
- PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s",
- strerror(errno));
+ vhost_kernel_tap_set_offload(tapfd, features);
+
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER;
+ memcpy(ifr.ifr_hwaddr.sa_data, mac, ETHER_ADDR_LEN);
+ if (ioctl(tapfd, SIOCSIFHWADDR, (void *)&ifr) == -1) {
+ PMD_DRV_LOG(ERR, "SIOCSIFHWADDR failed: %s", strerror(errno));
+ goto error;
+ }
if (!(*p_ifname))
*p_ifname = strdup(ifr.ifr_name);
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
index eae340cc..ea7a6c93 100644
--- a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
@@ -64,4 +64,5 @@
/* Constants */
#define PATH_NET_TUN "/dev/net/tun"
-int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq);
+int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq,
+ const char *mac, uint64_t features);
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index 7ce512c7..b13e77f5 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -166,17 +166,27 @@ error:
int virtio_user_stop_device(struct virtio_user_dev *dev)
{
+ struct vhost_vring_state state;
uint32_t i;
+ int error = 0;
for (i = 0; i < dev->max_queue_pairs; ++i)
dev->ops->enable_qp(dev, i, 0);
- if (dev->ops->send_request(dev, VHOST_USER_RESET_OWNER, NULL) < 0) {
- PMD_DRV_LOG(INFO, "Failed to reset the device\n");
- return -1;
+ /* Stop the backend. */
+ for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+ state.index = i;
+ if (dev->ops->send_request(dev, VHOST_USER_GET_VRING_BASE,
+ &state) < 0) {
+ PMD_DRV_LOG(ERR, "get_vring_base failed, index=%u\n",
+ i);
+ error = -1;
+ goto out;
+ }
}
- return 0;
+out:
+ return error;
}
static inline void
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 7be57ce6..15d459d3 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -423,7 +423,7 @@ virtio_user_pmd_probe(struct rte_vdev_device *dev)
}
} else {
PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
- VIRTIO_USER_ARG_QUEUE_SIZE);
+ VIRTIO_USER_ARG_PATH);
goto end;
}
diff --git a/examples/flow_filtering/flow_blocks.c b/examples/flow_filtering/flow_blocks.c
index f92df102..b71a42ff 100644
--- a/examples/flow_filtering/flow_blocks.c
+++ b/examples/flow_filtering/flow_blocks.c
@@ -74,8 +74,6 @@ generate_ipv4_flow(uint8_t port_id, uint16_t rx_q,
struct rte_flow_action_queue queue = { .index = rx_q };
struct rte_flow_item_eth eth_spec;
struct rte_flow_item_eth eth_mask;
- struct rte_flow_item_vlan vlan_spec;
- struct rte_flow_item_vlan vlan_mask;
struct rte_flow_item_ipv4 ip_spec;
struct rte_flow_item_ipv4 ip_mask;
int res;
@@ -113,17 +111,6 @@ generate_ipv4_flow(uint8_t port_id, uint16_t rx_q,
pattern[0].mask = &eth_mask;
/*
- * setting the second level of the pattern (vlan).
- * since in this example we just want to get the
- * ipv4 we also set this level to allow all.
- */
- memset(&vlan_spec, 0, sizeof(struct rte_flow_item_vlan));
- memset(&vlan_mask, 0, sizeof(struct rte_flow_item_vlan));
- pattern[1].type = RTE_FLOW_ITEM_TYPE_VLAN;
- pattern[1].spec = &vlan_spec;
- pattern[1].mask = &vlan_mask;
-
- /*
* setting the third level of the pattern (ip).
* in this example this is the level we care about
* so we set it according to the parameters.
@@ -134,12 +121,12 @@ generate_ipv4_flow(uint8_t port_id, uint16_t rx_q,
ip_mask.hdr.dst_addr = dest_mask;
ip_spec.hdr.src_addr = htonl(src_ip);
ip_mask.hdr.src_addr = src_mask;
- pattern[2].type = RTE_FLOW_ITEM_TYPE_IPV4;
- pattern[2].spec = &ip_spec;
- pattern[2].mask = &ip_mask;
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ pattern[1].spec = &ip_spec;
+ pattern[1].mask = &ip_mask;
/* the final level must be always type end */
- pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_END;
res = rte_flow_validate(port_id, &attr, pattern, action, error);
if (!res)
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 1c585165..6cfa6ebb 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -298,8 +298,6 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
hdr->tx_offload = pkt->tx_offload;
hdr->hash = pkt->hash;
- hdr->ol_flags = pkt->ol_flags;
-
__rte_mbuf_sanity_check(hdr, 1);
return hdr;
}
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 1f532fe3..82660a67 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -87,9 +87,6 @@
/* Max number of devices. Limited by vmdq. */
#define MAX_DEVICES 64
-/* Size of buffers used for snprintfs. */
-#define MAX_PRINT_BUFF 6072
-
/* Maximum long option length for option parsing. */
#define MAX_LONG_OPT_SZ 64
diff --git a/lib/librte_acl/rte_acl.h b/lib/librte_acl/rte_acl.h
index b53179a8..3a3ebcde 100644
--- a/lib/librte_acl/rte_acl.h
+++ b/lib/librte_acl/rte_acl.h
@@ -117,7 +117,7 @@ enum {
RTE_ACL_TYPE_SHIFT = 29,
RTE_ACL_MAX_INDEX = RTE_LEN2MASK(RTE_ACL_TYPE_SHIFT, uint32_t),
RTE_ACL_MAX_PRIORITY = RTE_ACL_MAX_INDEX,
- RTE_ACL_MIN_PRIORITY = 0,
+ RTE_ACL_MIN_PRIORITY = 1,
};
#define RTE_ACL_MASKLEN_TO_BITMASK(v, s) \
diff --git a/lib/librte_eal/common/arch/x86/rte_memcpy.c b/lib/librte_eal/common/arch/x86/rte_memcpy.c
deleted file mode 100644
index 174bef15..00000000
--- a/lib/librte_eal/common/arch/x86/rte_memcpy.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rte_memcpy.h>
-#include <rte_cpuflags.h>
-#include <rte_log.h>
-
-void *(*rte_memcpy_ptr)(void *dst, const void *src, size_t n) = NULL;
-
-RTE_INIT(rte_memcpy_init)
-{
-#ifdef CC_SUPPORT_AVX512F
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) {
- rte_memcpy_ptr = rte_memcpy_avx512f;
- RTE_LOG(DEBUG, EAL, "AVX512 memcpy is using!\n");
- return;
- }
-#endif
-#ifdef CC_SUPPORT_AVX2
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) {
- rte_memcpy_ptr = rte_memcpy_avx2;
- RTE_LOG(DEBUG, EAL, "AVX2 memcpy is using!\n");
- return;
- }
-#endif
- rte_memcpy_ptr = rte_memcpy_sse;
- RTE_LOG(DEBUG, EAL, "Default SSE/AVX memcpy is using!\n");
-}
diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c
index 996a0342..c7eb056b 100644
--- a/lib/librte_eal/common/eal_common_options.c
+++ b/lib/librte_eal/common/eal_common_options.c
@@ -232,7 +232,7 @@ eal_plugin_add(const char *path)
return -1;
}
memset(solib, 0, sizeof(*solib));
- strncpy(solib->name, path, PATH_MAX-1);
+ strlcpy(solib->name, path, PATH_MAX-1);
solib->name[PATH_MAX-1] = 0;
TAILQ_INSERT_TAIL(&solib_list, solib, next);
diff --git a/lib/librte_eal/common/include/arch/arm/rte_vect.h b/lib/librte_eal/common/include/arch/arm/rte_vect.h
index aa887a97..e5c1d358 100644
--- a/lib/librte_eal/common/include/arch/arm/rte_vect.h
+++ b/lib/librte_eal/common/include/arch/arm/rte_vect.h
@@ -106,6 +106,12 @@ vcopyq_laneq_u32(uint32x4_t a, const int lane_a,
typedef uint64_t poly64_t;
typedef uint64x2_t poly64x2_t;
typedef uint8_t poly128_t __attribute__((vector_size(16), aligned(16)));
+
+static inline uint32x4_t
+vceqzq_u32(uint32x4_t a)
+{
+ return (a == 0);
+}
#endif
/* NEON intrinsic vreinterpretq_u64_p128() is supported since GCC version 7 */
diff --git a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
index 596d7779..3f4a9db8 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_memcpy.h
@@ -603,7 +603,7 @@ rte_mov256(uint8_t *dst, const uint8_t *src)
*/
#define MOVEUNALIGNED_LEFT47_IMM(dst, src, len, offset) \
__extension__ ({ \
- int tmp; \
+ size_t tmp; \
while (len >= 128 + 16 - offset) { \
xmm0 = _mm_loadu_si128((const __m128i *)((const uint8_t *)src - offset + 0 * 16)); \
len -= 128; \
diff --git a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
index 5675c2b4..10cd7a30 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
@@ -105,10 +105,12 @@ static inline int rte_tm_supported(void)
static inline int
rte_try_tm(volatile int *lock)
{
+ int retries;
+
if (!rte_rtm_supported)
return 0;
- int retries = RTE_RTM_MAX_RETRIES;
+ retries = RTE_RTM_MAX_RETRIES;
while (likely(retries--)) {
diff --git a/lib/librte_eal/common/include/generic/rte_byteorder.h b/lib/librte_eal/common/include/generic/rte_byteorder.h
index 29e70c96..3d7ba5ec 100644
--- a/lib/librte_eal/common/include/generic/rte_byteorder.h
+++ b/lib/librte_eal/common/include/generic/rte_byteorder.h
@@ -152,7 +152,7 @@ typedef uint64_t rte_le64_t; /**< 64-bit little-endian value. */
static inline uint16_t
rte_constant_bswap16(uint16_t x)
{
- return RTE_STATIC_BSWAP16(x);
+ return (uint16_t)RTE_STATIC_BSWAP16(x);
}
/*
@@ -164,7 +164,7 @@ rte_constant_bswap16(uint16_t x)
static inline uint32_t
rte_constant_bswap32(uint32_t x)
{
- return RTE_STATIC_BSWAP32(x);
+ return (uint32_t)RTE_STATIC_BSWAP32(x);
}
/*
@@ -176,7 +176,7 @@ rte_constant_bswap32(uint32_t x)
static inline uint64_t
rte_constant_bswap64(uint64_t x)
{
- return RTE_STATIC_BSWAP64(x);
+ return (uint64_t)RTE_STATIC_BSWAP64(x);
}
diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h
index fdb3113d..f4ee1aa9 100644
--- a/lib/librte_eal/common/include/generic/rte_rwlock.h
+++ b/lib/librte_eal/common/include/generic/rte_rwlock.h
@@ -100,7 +100,7 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl)
continue;
}
success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
- x, x + 1);
+ (uint32_t)x, (uint32_t)(x + 1));
}
}
@@ -136,7 +136,7 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl)
continue;
}
success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
- 0, -1);
+ 0, (uint32_t)-1);
}
}
diff --git a/lib/librte_eal/common/include/rte_bitmap.h b/lib/librte_eal/common/include/rte_bitmap.h
index 13bfd9cb..eb270b27 100644
--- a/lib/librte_eal/common/include/rte_bitmap.h
+++ b/lib/librte_eal/common/include/rte_bitmap.h
@@ -117,7 +117,7 @@ __rte_bitmap_index1_inc(struct rte_bitmap *bmp)
static inline uint64_t
__rte_bitmap_mask1_get(struct rte_bitmap *bmp)
{
- return (~1lu) << bmp->offset1;
+ return (~1llu) << bmp->offset1;
}
static inline void
@@ -346,7 +346,7 @@ rte_bitmap_get(struct rte_bitmap *bmp, uint32_t pos)
index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK;
slab2 = bmp->array2 + index2;
- return (*slab2) & (1lu << offset2);
+ return (*slab2) & (1llu << offset2);
}
/**
@@ -371,8 +371,8 @@ rte_bitmap_set(struct rte_bitmap *bmp, uint32_t pos)
slab2 = bmp->array2 + index2;
slab1 = bmp->array1 + index1;
- *slab2 |= 1lu << offset2;
- *slab1 |= 1lu << offset1;
+ *slab2 |= 1llu << offset2;
+ *slab1 |= 1llu << offset1;
}
/**
@@ -399,7 +399,7 @@ rte_bitmap_set_slab(struct rte_bitmap *bmp, uint32_t pos, uint64_t slab)
slab1 = bmp->array1 + index1;
*slab2 |= slab;
- *slab1 |= 1lu << offset1;
+ *slab1 |= 1llu << offset1;
}
static inline uint64_t
@@ -437,7 +437,7 @@ rte_bitmap_clear(struct rte_bitmap *bmp, uint32_t pos)
slab2 = bmp->array2 + index2;
/* Return if array2 slab is not all-zeros */
- *slab2 &= ~(1lu << offset2);
+ *slab2 &= ~(1llu << offset2);
if (*slab2){
return;
}
@@ -453,7 +453,7 @@ rte_bitmap_clear(struct rte_bitmap *bmp, uint32_t pos)
index1 = pos >> (RTE_BITMAP_SLAB_BIT_SIZE_LOG2 + RTE_BITMAP_CL_BIT_SIZE_LOG2);
offset1 = (pos >> RTE_BITMAP_CL_BIT_SIZE_LOG2) & RTE_BITMAP_SLAB_BIT_MASK;
slab1 = bmp->array1 + index1;
- *slab1 &= ~(1lu << offset1);
+ *slab1 &= ~(1llu << offset1);
return;
}
diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h
index f1d24b86..4485d634 100644
--- a/lib/librte_eal/common/include/rte_common.h
+++ b/lib/librte_eal/common/include/rte_common.h
@@ -241,16 +241,7 @@ rte_is_aligned(void *ptr, unsigned align)
/**
* Triggers an error at compilation time if the condition is true.
*/
-#ifndef __OPTIMIZE__
#define RTE_BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
-#else
-extern int RTE_BUILD_BUG_ON_detected_error;
-#define RTE_BUILD_BUG_ON(condition) do { \
- ((void)sizeof(char[1 - 2*!!(condition)])); \
- if (condition) \
- RTE_BUILD_BUG_ON_detected_error = 1; \
-} while(0)
-#endif
/*********** Macros to work with powers of 2 ********/
@@ -349,7 +340,7 @@ rte_align64pow2(uint64_t v)
static inline uint32_t
rte_bsf32(uint32_t v)
{
- return __builtin_ctz(v);
+ return (uint32_t)__builtin_ctz(v);
}
/**
@@ -369,6 +360,25 @@ rte_log2_u32(uint32_t v)
return rte_bsf32(v);
}
+
+/**
+ * Return the last (most-significant) bit set.
+ *
+ * @note The last (most significant) bit is at position 32.
+ * @note rte_fls_u32(0) = 0, rte_fls_u32(1) = 1, rte_fls_u32(0x80000000) = 32
+ *
+ * @param x
+ * The input parameter.
+ * @return
+ * The last (most-significant) bit set, or 0 if the input is 0.
+ */
+static inline int
+rte_fls_u32(uint32_t x)
+{
+ return (x == 0) ? 0 : 32 - __builtin_clz(x);
+}
+
+
#ifndef offsetof
/** Return the offset of a field in a structure. */
#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER)
diff --git a/lib/librte_eal/common/include/rte_dev.h b/lib/librte_eal/common/include/rte_dev.h
index 8088dcc5..bc6e5926 100644
--- a/lib/librte_eal/common/include/rte_dev.h
+++ b/lib/librte_eal/common/include/rte_dev.h
@@ -60,15 +60,18 @@ rte_pmd_debug_trace(const char *func_name, const char *fmt, ...)
va_start(ap, fmt);
- char buffer[vsnprintf(NULL, 0, fmt, ap) + 1];
+ {
+ char buffer[vsnprintf(NULL, 0, fmt, ap) + 1];
- va_end(ap);
+ va_end(ap);
- va_start(ap, fmt);
- vsnprintf(buffer, sizeof(buffer), fmt, ap);
- va_end(ap);
+ va_start(ap, fmt);
+ vsnprintf(buffer, sizeof(buffer), fmt, ap);
+ va_end(ap);
- rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, "%s: %s", func_name, buffer);
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, "%s: %s",
+ func_name, buffer);
+ }
}
/*
diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h
index 3735da0c..0dd710de 100644
--- a/lib/librte_eal/common/include/rte_lcore.h
+++ b/lib/librte_eal/common/include/rte_lcore.h
@@ -136,7 +136,7 @@ rte_lcore_index(int lcore_id)
if (lcore_id >= RTE_MAX_LCORE)
return -1;
if (lcore_id < 0)
- lcore_id = rte_lcore_id();
+ lcore_id = (int)rte_lcore_id();
return lcore_config[lcore_id].core_index;
}
diff --git a/lib/librte_eal/common/include/rte_random.h b/lib/librte_eal/common/include/rte_random.h
index aeff1f05..bfbfd13a 100644
--- a/lib/librte_eal/common/include/rte_random.h
+++ b/lib/librte_eal/common/include/rte_random.h
@@ -60,7 +60,7 @@ extern "C" {
static inline void
rte_srand(uint64_t seedval)
{
- srand48((long unsigned int)seedval);
+ srand48((long)seedval);
}
/**
@@ -77,9 +77,9 @@ static inline uint64_t
rte_rand(void)
{
uint64_t val;
- val = lrand48();
+ val = (uint64_t)lrand48();
val <<= 32;
- val += lrand48();
+ val += (uint64_t)lrand48();
return val;
}
diff --git a/lib/librte_eal/common/include/rte_string_fns.h b/lib/librte_eal/common/include/rte_string_fns.h
index cfca2f8d..7d57bb8e 100644
--- a/lib/librte_eal/common/include/rte_string_fns.h
+++ b/lib/librte_eal/common/include/rte_string_fns.h
@@ -44,6 +44,8 @@
extern "C" {
#endif
+#include <stdio.h>
+
/**
* Takes string "string" parameter and splits it at character "delim"
* up to maxtokens-1 times - to give "maxtokens" resulting tokens. Like
@@ -74,6 +76,35 @@ int
rte_strsplit(char *string, int stringlen,
char **tokens, int maxtokens, char delim);
+/**
+ * @internal
+ * DPDK-specific version of strlcpy for systems without
+ * libc or libbsd copies of the function
+ */
+static inline size_t
+rte_strlcpy(char *dst, const char *src, size_t size)
+{
+ return (size_t)snprintf(dst, size, "%s", src);
+}
+
+/* pull in a strlcpy function */
+#ifdef RTE_EXEC_ENV_BSDAPP
+#include <string.h>
+#ifndef __BSD_VISIBLE /* non-standard functions are hidden */
+#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size)
+#endif
+
+
+#else /* non-BSD platforms */
+#ifdef RTE_USE_LIBBSD
+#include <bsd/string.h>
+
+#else /* no BSD header files, create own */
+#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size)
+
+#endif /* RTE_USE_LIBBSD */
+#endif /* BSDAPP */
+
#ifdef __cplusplus
}
#endif
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index 4d0a9f7c..a44d3357 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -66,7 +66,7 @@ extern "C" {
/**
* Patch level number i.e. the z in yy.mm.z
*/
-#define RTE_VER_MINOR 4
+#define RTE_VER_MINOR 5
/**
* Extra string to be appended to version number
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
index 229eec9f..e6a73313 100644
--- a/lib/librte_eal/linuxapp/eal/eal.c
+++ b/lib/librte_eal/linuxapp/eal/eal.c
@@ -743,7 +743,8 @@ rte_eal_init(int argc, char **argv)
int i, fctret, ret;
pthread_t thread_id;
static rte_atomic32_t run_once = RTE_ATOMIC32_INIT(0);
- const char *logid;
+ const char *p;
+ static char logid[PATH_MAX];
char cpuset[RTE_CPU_AFFINITY_STR_LEN];
char thread_name[RTE_MAX_THREAD_NAME_LEN];
@@ -760,9 +761,8 @@ rte_eal_init(int argc, char **argv)
return -1;
}
- logid = strrchr(argv[0], '/');
- logid = strdup(logid ? logid + 1: argv[0]);
-
+ p = strrchr(argv[0], '/');
+ strlcpy(logid, p ? p + 1 : argv[0], sizeof(logid));
thread_id = pthread_self();
eal_reset_internal_config(&internal_config);
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
index e1179b85..c54b8823 100644
--- a/lib/librte_eal/linuxapp/eal/eal_interrupts.c
+++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
@@ -652,7 +652,7 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
bool call = false;
int n, bytes_read;
struct rte_intr_source *src;
- struct rte_intr_callback *cb;
+ struct rte_intr_callback *cb, *next;
union rte_intr_read_buffer buf;
struct rte_intr_callback active_cb;
@@ -723,6 +723,23 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds)
"descriptor %d: %s\n",
events[n].data.fd,
strerror(errno));
+ /*
+ * The device is unplugged or buggy, remove
+ * it as an interrupt source and return to
+ * force the wait list to be rebuilt.
+ */
+ rte_spinlock_lock(&intr_lock);
+ TAILQ_REMOVE(&intr_sources, src, next);
+ rte_spinlock_unlock(&intr_lock);
+
+ for (cb = TAILQ_FIRST(&src->callbacks); cb;
+ cb = next) {
+ next = TAILQ_NEXT(cb, next);
+ TAILQ_REMOVE(&src->callbacks, cb, next);
+ free(cb);
+ }
+ free(src);
+ return -1;
} else if (bytes_read == 0)
RTE_LOG(ERR, EAL, "Read nothing from file "
"descriptor %d\n", events[n].data.fd);
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
index bac969a1..8d0456b5 100644
--- a/lib/librte_eal/linuxapp/eal/eal_memory.c
+++ b/lib/librte_eal/linuxapp/eal/eal_memory.c
@@ -396,7 +396,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
int node_id = -1;
int essential_prev = 0;
int oldpolicy;
- struct bitmask *oldmask = numa_allocate_nodemask();
+ struct bitmask *oldmask = NULL;
bool have_numa = true;
unsigned long maxnode = 0;
@@ -408,6 +408,7 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
if (orig && have_numa) {
RTE_LOG(DEBUG, EAL, "Trying to obtain current memory policy.\n");
+ oldmask = numa_allocate_nodemask();
if (get_mempolicy(&oldpolicy, oldmask->maskp,
oldmask->size + 1, 0, 0) < 0) {
RTE_LOG(ERR, EAL,
@@ -421,6 +422,21 @@ map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
}
#endif
+#ifdef RTE_ARCH_64
+ /*
+ * Hugepages are first mmaped individually and then re-mmapped to
+ * another region for having contiguous physical pages in contiguous
+ * virtual addresses. Setting here vma_addr for the first hugepage
+ * mapped to a virtual address which will not collide with the second
+ * mmaping later. The next hugepages will use increments of this
+ * initial address.
+ *
+ * The final virtual address will be based on baseaddr which is
+ * 0x100000000. We use a hint here starting at 0x200000000, leaving
+ * another 4GB just in case, plus the total available hugepages memory.
+ */
+ vma_addr = (char *)0x200000000 + (hpi->hugepage_sz * hpi->num_pages[0]);
+#endif
for (i = 0; i < hpi->num_pages[0]; i++) {
uint64_t hugepage_sz = hpi->hugepage_sz;
@@ -588,7 +604,8 @@ out:
numa_set_localalloc();
}
}
- numa_free_cpumask(oldmask);
+ if (oldmask != NULL)
+ numa_free_cpumask(oldmask);
#endif
return i;
}
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
index 794cd4f7..966fb9f4 100644
--- a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
+++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_kni_common.h
@@ -61,6 +61,7 @@
#ifdef __KERNEL__
#include <linux/if.h>
+#include <asm/barrier.h>
#define RTE_STD_C11
#else
#include <rte_common.h>
diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
index 1826adba..45d70272 100644
--- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
+++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
@@ -45,8 +45,7 @@ struct rte_uio_pci_dev {
struct uio_info info;
struct pci_dev *pdev;
enum rte_intr_mode mode;
- struct mutex lock;
- int refcnt;
+ atomic_t refcnt;
};
static char *intr_mode;
@@ -338,23 +337,19 @@ igbuio_pci_open(struct uio_info *info, struct inode *inode)
struct pci_dev *dev = udev->pdev;
int err;
- mutex_lock(&udev->lock);
- if (++udev->refcnt > 1) {
- mutex_unlock(&udev->lock);
+ if (atomic_inc_return(&udev->refcnt) != 1)
return 0;
- }
/* set bus master, which was cleared by the reset function */
pci_set_master(dev);
/* enable interrupts */
err = igbuio_pci_enable_interrupts(udev);
- mutex_unlock(&udev->lock);
if (err) {
+ atomic_dec(&udev->refcnt);
dev_err(&dev->dev, "Enable interrupt fails\n");
- return err;
}
- return 0;
+ return err;
}
static int
@@ -363,19 +358,14 @@ igbuio_pci_release(struct uio_info *info, struct inode *inode)
struct rte_uio_pci_dev *udev = info->priv;
struct pci_dev *dev = udev->pdev;
- mutex_lock(&udev->lock);
- if (--udev->refcnt > 0) {
- mutex_unlock(&udev->lock);
- return 0;
- }
-
- /* disable interrupts */
- igbuio_pci_disable_interrupts(udev);
+ if (atomic_dec_and_test(&udev->refcnt)) {
+ /* disable interrupts */
+ igbuio_pci_disable_interrupts(udev);
- /* stop the device from further DMA */
- pci_clear_master(dev);
+ /* stop the device from further DMA */
+ pci_clear_master(dev);
+ }
- mutex_unlock(&udev->lock);
return 0;
}
@@ -496,7 +486,6 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (!udev)
return -ENOMEM;
- mutex_init(&udev->lock);
/*
* enable device: ask low-level code to enable I/O and
* memory
@@ -536,6 +525,7 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
udev->info.release = igbuio_pci_release;
udev->info.priv = udev;
udev->pdev = dev;
+ atomic_set(&udev->refcnt, 0);
err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
if (err != 0)
@@ -587,7 +577,6 @@ igbuio_pci_remove(struct pci_dev *dev)
{
struct rte_uio_pci_dev *udev = pci_get_drvdata(dev);
- mutex_destroy(&udev->lock);
sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
uio_unregister_device(&udev->info);
igbuio_pci_release_iomem(&udev->info);
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
index aed14bcc..7a520ece 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb_ethtool.c
@@ -150,6 +150,7 @@ static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
#endif /* ETHTOOL_TEST */
+#ifndef ETHTOOL_GLINKSETTINGS
static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -274,7 +275,9 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
#endif /* ETH_TP_MDI_X */
return 0;
}
+#endif
+#ifndef ETHTOOL_SLINKSETTINGS
static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -379,6 +382,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
clear_bit(__IGB_RESETTING, &adapter->state);
return 0;
}
+#endif
static u32 igb_get_link(struct net_device *netdev)
{
@@ -2752,8 +2756,12 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
#endif /* ETHTOOL_GRXRINGS */
static const struct ethtool_ops igb_ethtool_ops = {
+#ifndef ETHTOOL_GLINKSETTINGS
.get_settings = igb_get_settings,
+#endif
+#ifndef ETHTOOL_SLINKSETTINGS
.set_settings = igb_set_settings,
+#endif
.get_drvinfo = igb_get_drvinfo,
.get_regs_len = igb_get_regs_len,
.get_regs = igb_get_regs,
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h
index 59415469..cc3f8ea4 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h
+++ b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe.h
@@ -905,8 +905,10 @@ s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame);
#endif /* CONFIG_DCB */
extern void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring);
+#ifndef ETHTOOL_GLINKSETTINGS
extern int ixgbe_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd);
+#endif
extern int ixgbe_write_uc_addr_list(struct ixgbe_adapter *adapter,
struct net_device *netdev, unsigned int vfn);
extern void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
index cdfcb959..1296829b 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
+++ b/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/ixgbe_ethtool.c
@@ -173,6 +173,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
#endif /* ETHTOOL_TEST */
+#ifndef ETHTOOL_GLINKSETTINGS
int ixgbe_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -362,7 +363,9 @@ int ixgbe_get_settings(struct net_device *netdev,
return 0;
}
+#endif
+#ifndef ETHTOOL_SLINKSETTINGS
static int ixgbe_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -406,6 +409,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
}
return err;
}
+#endif
static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
@@ -2830,8 +2834,12 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
#endif /* ETHTOOL_GRXRINGS */
//static
struct ethtool_ops ixgbe_ethtool_ops = {
+#ifndef ETHTOOL_GLINKSETTINGS
.get_settings = ixgbe_get_settings,
+#endif
+#ifndef ETHTOOL_SLINKSETTINGS
.set_settings = ixgbe_set_settings,
+#endif
.get_drvinfo = ixgbe_get_drvinfo,
.get_regs_len = ixgbe_get_regs_len,
.get_regs = ixgbe_get_regs,
diff --git a/lib/librte_eal/linuxapp/kni/kni_ethtool.c b/lib/librte_eal/linuxapp/kni/kni_ethtool.c
index 0c88589c..8174e98d 100644
--- a/lib/librte_eal/linuxapp/kni/kni_ethtool.c
+++ b/lib/librte_eal/linuxapp/kni/kni_ethtool.c
@@ -46,6 +46,8 @@ kni_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
priv->lad_dev->ethtool_ops->get_drvinfo(priv->lad_dev, info);
}
+/* ETHTOOL_GLINKSETTINGS replaces ETHTOOL_GSET */
+#ifndef ETHTOOL_GLINKSETTINGS
static int
kni_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
@@ -53,7 +55,10 @@ kni_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return priv->lad_dev->ethtool_ops->get_settings(priv->lad_dev, ecmd);
}
+#endif
+/* ETHTOOL_SLINKSETTINGS replaces ETHTOOL_SSET */
+#ifndef ETHTOOL_SLINKSETTINGS
static int
kni_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
@@ -61,6 +66,7 @@ kni_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return priv->lad_dev->ethtool_ops->set_settings(priv->lad_dev, ecmd);
}
+#endif
static void
kni_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -209,8 +215,12 @@ kni_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats,
struct ethtool_ops kni_ethtool_ops = {
.begin = kni_check_if_running,
.get_drvinfo = kni_get_drvinfo,
+#ifndef ETHTOOL_GLINKSETTINGS
.get_settings = kni_get_settings,
+#endif
+#ifndef ETHTOOL_SLINKSETTINGS
.set_settings = kni_set_settings,
+#endif
.get_regs_len = kni_get_regs_len,
.get_regs = kni_get_regs,
.get_wol = kni_get_wol,
diff --git a/lib/librte_eal/linuxapp/kni/kni_fifo.h b/lib/librte_eal/linuxapp/kni/kni_fifo.h
index 14f4141f..26208ae4 100644
--- a/lib/librte_eal/linuxapp/kni/kni_fifo.h
+++ b/lib/librte_eal/linuxapp/kni/kni_fifo.h
@@ -27,6 +27,14 @@
#include <exec-env/rte_kni_common.h>
+/* Skip some memory barriers on Linux < 3.14 */
+#ifndef smp_load_acquire
+#define smp_load_acquire(a) (*(a))
+#endif
+#ifndef smp_store_release
+#define smp_store_release(a, b) *(a) = (b)
+#endif
+
/**
* Adds num elements into the fifo. Return the number actually written
*/
@@ -35,7 +43,7 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, uint32_t num)
{
uint32_t i = 0;
uint32_t fifo_write = fifo->write;
- uint32_t fifo_read = fifo->read;
+ uint32_t fifo_read = smp_load_acquire(&fifo->read);
uint32_t new_write = fifo_write;
for (i = 0; i < num; i++) {
@@ -46,7 +54,7 @@ kni_fifo_put(struct rte_kni_fifo *fifo, void **data, uint32_t num)
fifo->buffer[fifo_write] = data[i];
fifo_write = new_write;
}
- fifo->write = fifo_write;
+ smp_store_release(&fifo->write, fifo_write);
return i;
}
@@ -59,7 +67,7 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)
{
uint32_t i = 0;
uint32_t new_read = fifo->read;
- uint32_t fifo_write = fifo->write;
+ uint32_t fifo_write = smp_load_acquire(&fifo->write);
for (i = 0; i < num; i++) {
if (new_read == fifo_write)
@@ -68,7 +76,7 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)
data[i] = fifo->buffer[new_read];
new_read = (new_read + 1) & (fifo->len - 1);
}
- fifo->read = new_read;
+ smp_store_release(&fifo->read, new_read);
return i;
}
@@ -79,7 +87,9 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)
static inline uint32_t
kni_fifo_count(struct rte_kni_fifo *fifo)
{
- return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
+ uint32_t fifo_write = smp_load_acquire(&fifo->write);
+ uint32_t fifo_read = smp_load_acquire(&fifo->read);
+ return (fifo->len + fifo_write - fifo_read) & (fifo->len - 1);
}
/**
@@ -88,7 +98,9 @@ kni_fifo_count(struct rte_kni_fifo *fifo)
static inline uint32_t
kni_fifo_free_count(struct rte_kni_fifo *fifo)
{
- return (fifo->read - fifo->write - 1) & (fifo->len - 1);
+ uint32_t fifo_write = smp_load_acquire(&fifo->write);
+ uint32_t fifo_read = smp_load_acquire(&fifo->read);
+ return (fifo_read - fifo_write - 1) & (fifo->len - 1);
}
#endif /* _KNI_FIFO_H_ */
diff --git a/lib/librte_efd/rte_efd.c b/lib/librte_efd/rte_efd.c
index 7d0b5cc6..6b611b55 100644
--- a/lib/librte_efd/rte_efd.c
+++ b/lib/librte_efd/rte_efd.c
@@ -721,7 +721,8 @@ rte_efd_create(const char *name, uint32_t max_num_rules, uint32_t key_len,
offline_cpu_socket, 0);
if (r == NULL) {
RTE_LOG(ERR, EFD, "memory allocation failed\n");
- goto error_unlock_exit;
+ rte_efd_free(table);
+ return NULL;
}
/* Populate free slots ring. Entry zero is reserved for key misses. */
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
index eea11d06..096b35fa 100644
--- a/lib/librte_ether/rte_ethdev.c
+++ b/lib/librte_ether/rte_ethdev.c
@@ -507,6 +507,13 @@ rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
+ if (!dev->data->dev_started) {
+ RTE_PMD_DEBUG_TRACE(
+ "port %d must be started before start any queue\n",
+ port_id);
+ return -EINVAL;
+ }
+
if (rx_queue_id >= dev->data->nb_rx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
return -EINVAL;
@@ -533,12 +540,6 @@ rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
- if (!dev->data->dev_started) {
- RTE_PMD_DEBUG_TRACE(
- "port %d must be started before start any queue\n", port_id);
- return -EINVAL;
- }
-
if (rx_queue_id >= dev->data->nb_rx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
return -EINVAL;
@@ -565,6 +566,13 @@ rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
+ if (!dev->data->dev_started) {
+ RTE_PMD_DEBUG_TRACE(
+ "port %d must be started before start any queue\n",
+ port_id);
+ return -EINVAL;
+ }
+
if (tx_queue_id >= dev->data->nb_tx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
return -EINVAL;
@@ -591,12 +599,6 @@ rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
- if (!dev->data->dev_started) {
- RTE_PMD_DEBUG_TRACE(
- "port %d must be started before start any queue\n", port_id);
- return -EINVAL;
- }
-
if (tx_queue_id >= dev->data->nb_tx_queues) {
RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
return -EINVAL;
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
index 47e37a6f..f252e411 100644
--- a/lib/librte_ether/rte_ethdev.h
+++ b/lib/librte_ether/rte_ethdev.h
@@ -944,12 +944,6 @@ struct rte_eth_conf {
};
/**
- * A structure used to retrieve the contextual information of
- * an Ethernet device, such as the controlling driver of the device,
- * its PCI context, etc...
- */
-
-/**
* RX offload capabilities of a device.
*/
#define DEV_RX_OFFLOAD_VLAN_STRIP 0x00000001
@@ -1010,6 +1004,12 @@ struct rte_pci_device;
/**
* Ethernet device information
*/
+
+/**
+ * A structure used to retrieve the contextual information of
+ * an Ethernet device, such as the controlling driver of the
+ * device, etc...
+ */
struct rte_eth_dev_info {
struct rte_pci_device *pci_dev; /**< Device PCI information. */
const char *driver_name; /**< Device Driver name. */
@@ -2884,6 +2884,7 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
{
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ uint16_t nb_rx;
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
@@ -2894,13 +2895,14 @@ rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id,
return 0;
}
#endif
- int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
- rx_pkts, nb_pkts);
+ nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
+ rx_pkts, nb_pkts);
#ifdef RTE_ETHDEV_RXTX_CALLBACKS
- struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id];
+ if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) {
+ struct rte_eth_rxtx_callback *cb =
+ dev->post_rx_burst_cbs[queue_id];
- if (unlikely(cb != NULL)) {
do {
nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx,
nb_pkts, cb->param);
@@ -2935,7 +2937,7 @@ rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
if (queue_id >= dev->data->nb_rx_queues)
return -EINVAL;
- return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+ return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id);
}
/**
@@ -3361,8 +3363,9 @@ rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id,
/* All packets sent, or to be dealt with by callback below */
if (unlikely(sent != to_send))
- buffer->error_callback(&buffer->pkts[sent], to_send - sent,
- buffer->error_userdata);
+ buffer->error_callback(&buffer->pkts[sent],
+ (uint16_t)(to_send - sent),
+ buffer->error_userdata);
return sent;
}
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
index d5c3fd56..3688b8e2 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -545,8 +545,8 @@ event_eth_rx_adapter_service_func(void *args)
if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
return 0;
if (!rx_adapter->rxa_started) {
- return 0;
rte_spinlock_unlock(&rx_adapter->rx_lock);
+ return 0;
}
eth_rx_poll(rx_adapter);
rte_spinlock_unlock(&rx_adapter->rx_lock);
@@ -900,7 +900,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
rx_adapter->conf_arg = conf_arg;
strcpy(rx_adapter->mem_name, mem_name);
rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
- rte_eth_dev_count() *
+ RTE_MAX_ETHPORTS *
sizeof(struct eth_device_info), 0,
socket_id);
rte_convert_rss_key((const uint32_t *)default_rss_key,
@@ -913,7 +913,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
return -ENOMEM;
}
rte_spinlock_init(&rx_adapter->rx_lock);
- for (i = 0; i < rte_eth_dev_count(); i++)
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++)
rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
event_eth_rx_adapter[id] = rx_adapter;
diff --git a/lib/librte_gro/gro_tcp4.c b/lib/librte_gro/gro_tcp4.c
index 61a04232..d1c6c7de 100644
--- a/lib/librte_gro/gro_tcp4.c
+++ b/lib/librte_gro/gro_tcp4.c
@@ -343,7 +343,8 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
struct ipv4_hdr *ipv4_hdr;
struct tcp_hdr *tcp_hdr;
uint32_t sent_seq;
- uint16_t tcp_dl, ip_id;
+ uint16_t ip_id;
+ int32_t tcp_dl;
struct tcp4_key key;
uint32_t cur_idx, prev_idx, item_idx;
@@ -360,10 +361,10 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
*/
if (tcp_hdr->tcp_flags != TCP_ACK_FLAG)
return -1;
- /* if payload length is 0, return immediately */
+ /* if payload length is less than or equal to 0, return immediately */
tcp_dl = rte_be_to_cpu_16(ipv4_hdr->total_length) - pkt->l3_len -
pkt->l4_len;
- if (tcp_dl == 0)
+ if (tcp_dl <= 0)
return -1;
ip_id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
diff --git a/lib/librte_hash/rte_hash_crc.h b/lib/librte_hash/rte_hash_crc.h
index 93188c29..5a0f2d46 100644
--- a/lib/librte_hash/rte_hash_crc.h
+++ b/lib/librte_hash/rte_hash_crc.h
@@ -367,14 +367,13 @@ crc32c_1word(uint32_t data, uint32_t init_val)
static inline uint32_t
crc32c_2words(uint64_t data, uint32_t init_val)
{
+ uint32_t crc, term1, term2;
union {
uint64_t u64;
uint32_t u32[2];
} d;
d.u64 = data;
- uint32_t crc, term1, term2;
-
crc = init_val;
crc ^= d.u32[0];
@@ -428,9 +427,9 @@ crc32c_sse42_u64_mimic(uint64_t data, uint64_t init_val)
} d;
d.u64 = data;
- init_val = crc32c_sse42_u32(d.u32[0], init_val);
- init_val = crc32c_sse42_u32(d.u32[1], init_val);
- return init_val;
+ init_val = crc32c_sse42_u32(d.u32[0], (uint32_t)init_val);
+ init_val = crc32c_sse42_u32(d.u32[1], (uint32_t)init_val);
+ return (uint32_t)init_val;
}
#endif
@@ -442,7 +441,7 @@ crc32c_sse42_u64(uint64_t data, uint64_t init_val)
"crc32q %[data], %[init_val];"
: [init_val] "+r" (init_val)
: [data] "rm" (data));
- return init_val;
+ return (uint32_t)init_val;
}
#endif
diff --git a/lib/librte_ip_frag/ip_frag_common.h b/lib/librte_ip_frag/ip_frag_common.h
index 9f561965..f3abc98b 100644
--- a/lib/librte_ip_frag/ip_frag_common.h
+++ b/lib/librte_ip_frag/ip_frag_common.h
@@ -81,28 +81,23 @@ struct rte_mbuf *ipv6_frag_reassemble(struct ip_frag_pkt *fp);
static inline int
ip_frag_key_is_empty(const struct ip_frag_key * key)
{
- uint32_t i;
- for (i = 0; i < RTE_MIN(key->key_len, RTE_DIM(key->src_dst)); i++)
- if (key->src_dst[i] != 0)
- return 0;
- return 1;
+ return (key->key_len == 0);
}
-/* empty the key */
+/* invalidate the key */
static inline void
ip_frag_key_invalidate(struct ip_frag_key * key)
{
- uint32_t i;
- for (i = 0; i < key->key_len; i++)
- key->src_dst[i] = 0;
+ key->key_len = 0;
}
/* compare two keys */
-static inline int
+static inline uint64_t
ip_frag_key_cmp(const struct ip_frag_key * k1, const struct ip_frag_key * k2)
{
- uint32_t i, val;
- val = k1->id ^ k2->id;
+ uint32_t i;
+ uint64_t val;
+ val = k1->id_key_len ^ k2->id_key_len;
for (i = 0; i < k1->key_len; i++)
val |= k1->src_dst[i] ^ k2->src_dst[i];
return val;
diff --git a/lib/librte_ip_frag/rte_ip_frag.h b/lib/librte_ip_frag/rte_ip_frag.h
index 01441641..407991b9 100644
--- a/lib/librte_ip_frag/rte_ip_frag.h
+++ b/lib/librte_ip_frag/rte_ip_frag.h
@@ -73,9 +73,17 @@ struct ip_frag {
/** @internal <src addr, dst_addr, id> to uniquely identify fragmented datagram. */
struct ip_frag_key {
- uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */
- uint32_t id; /**< dst address */
- uint32_t key_len; /**< src/dst key length */
+ uint64_t src_dst[4];
+ /**< src and dst address, only first 8 bytes used for IPv4 */
+ RTE_STD_C11
+ union {
+ uint64_t id_key_len; /**< combined for easy fetch */
+ __extension__
+ struct {
+ uint32_t id; /**< packet id */
+ uint32_t key_len; /**< src/dst key length */
+ };
+ };
};
/**
diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index 4b45d1a0..fbd250da 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -532,7 +532,7 @@ int
rte_kni_handle_request(struct rte_kni *kni)
{
unsigned ret;
- struct rte_kni_request *req;
+ struct rte_kni_request *req = NULL;
if (kni == NULL)
return -1;
diff --git a/lib/librte_kvargs/rte_kvargs.c b/lib/librte_kvargs/rte_kvargs.c
index 854ac83f..c71ad4ab 100644
--- a/lib/librte_kvargs/rte_kvargs.c
+++ b/lib/librte_kvargs/rte_kvargs.c
@@ -160,6 +160,9 @@ rte_kvargs_process(const struct rte_kvargs *kvlist,
const struct rte_kvargs_pair *pair;
unsigned i;
+ if (kvlist == NULL)
+ return 0;
+
for (i = 0; i < kvlist->count; i++) {
pair = &kvlist->pairs[i];
if (key_match == NULL || strcmp(pair->key, key_match) == 0) {
diff --git a/lib/librte_kvargs/rte_kvargs.h b/lib/librte_kvargs/rte_kvargs.h
index 5821c726..53beed50 100644
--- a/lib/librte_kvargs/rte_kvargs.h
+++ b/lib/librte_kvargs/rte_kvargs.h
@@ -107,7 +107,7 @@ struct rte_kvargs *rte_kvargs_parse(const char *args,
* rte_kvargs_parse().
*
* @param kvlist
- * The rte_kvargs structure
+ * The rte_kvargs structure. No error if NULL.
*/
void rte_kvargs_free(struct rte_kvargs *kvlist);
@@ -116,11 +116,10 @@ void rte_kvargs_free(struct rte_kvargs *kvlist);
*
* For each key/value association that matches the given key, calls the
* handler function with the for a given arg_name passing the value on the
- * dictionary for that key and a given extra argument. If *kvlist* is NULL
- * function does nothing.
+ * dictionary for that key and a given extra argument.
*
* @param kvlist
- * The rte_kvargs structure
+ * The rte_kvargs structure. No error if NULL.
* @param key_match
* The key on which the handler should be called, or NULL to process handler
* on all associations
diff --git a/lib/librte_latencystats/rte_latencystats.c b/lib/librte_latencystats/rte_latencystats.c
index d6ad13c4..b038c815 100644
--- a/lib/librte_latencystats/rte_latencystats.c
+++ b/lib/librte_latencystats/rte_latencystats.c
@@ -153,8 +153,11 @@ add_time_stamps(uint16_t pid __rte_unused,
for (i = 0; i < nb_pkts; i++) {
diff_tsc = now - prev_tsc;
timer_tsc += diff_tsc;
- if (timer_tsc >= samp_intvl) {
+
+ if ((pkts[i]->ol_flags & PKT_RX_TIMESTAMP) == 0
+ && (timer_tsc >= samp_intvl)) {
pkts[i]->timestamp = now;
+ pkts[i]->ol_flags |= PKT_RX_TIMESTAMP;
timer_tsc = 0;
}
prev_tsc = now;
@@ -184,7 +187,7 @@ calc_latency(uint16_t pid __rte_unused,
now = rte_rdtsc();
for (i = 0; i < nb_pkts; i++) {
- if (pkts[i]->timestamp)
+ if (pkts[i]->ol_flags & PKT_RX_TIMESTAMP)
latency[cnt++] = now - pkts[i]->timestamp;
}
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 807a8d41..e6fd86f2 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -322,13 +322,17 @@ extern "C" {
* which can be set for packet.
*/
#define PKT_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
- PKT_TX_OUTER_IP_CKSUM | \
- PKT_TX_TCP_SEG | \
PKT_TX_IEEE1588_TMST | \
+ PKT_TX_TCP_SEG | \
PKT_TX_QINQ_PKT | \
- PKT_TX_VLAN_PKT | \
PKT_TX_TUNNEL_MASK | \
PKT_TX_MACSEC | \
PKT_TX_SEC_OFFLOAD)
@@ -755,7 +759,7 @@ rte_mbuf_refcnt_read(const struct rte_mbuf *m)
static inline void
rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
{
- rte_atomic16_set(&m->refcnt_atomic, new_value);
+ rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value);
}
/* internal */
@@ -785,8 +789,9 @@ rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
* reference counter can occur.
*/
if (likely(rte_mbuf_refcnt_read(m) == 1)) {
- rte_mbuf_refcnt_set(m, 1 + value);
- return 1 + value;
+ ++value;
+ rte_mbuf_refcnt_set(m, (uint16_t)value);
+ return (uint16_t)value;
}
return __rte_mbuf_refcnt_update(m, value);
@@ -1139,7 +1144,8 @@ rte_pktmbuf_priv_size(struct rte_mempool *mp)
*/
static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
{
- m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
+ m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
+ (uint16_t)m->buf_len);
}
/**
@@ -1319,10 +1325,11 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
{
struct rte_mbuf *md = rte_mbuf_from_indirect(m);
struct rte_mempool *mp = m->pool;
- uint32_t mbuf_size, buf_len, priv_size;
+ uint32_t mbuf_size, buf_len;
+ uint16_t priv_size;
priv_size = rte_pktmbuf_priv_size(mp);
- mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
buf_len = rte_pktmbuf_data_room_size(mp);
m->priv_size = priv_size;
@@ -1659,7 +1666,10 @@ static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
if (unlikely(len > rte_pktmbuf_headroom(m)))
return NULL;
- m->data_off -= len;
+ /* NB: elaborating the subtraction like this instead of using
+ * -= allows us to ensure the result type is uint16_t
+ * avoiding compiler warnings on gcc 8.1 at least */
+ m->data_off = (uint16_t)(m->data_off - len);
m->data_len = (uint16_t)(m->data_len + len);
m->pkt_len = (m->pkt_len + len);
@@ -1719,8 +1729,11 @@ static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
if (unlikely(len > m->data_len))
return NULL;
+ /* NB: elaborating the addition like this instead of using
+ * += allows us to ensure the result type is uint16_t
+ * avoiding compiler warnings on gcc 8.1 at least */
m->data_len = (uint16_t)(m->data_len - len);
- m->data_off += len;
+ m->data_off = (uint16_t)(m->data_off + len);
m->pkt_len = (m->pkt_len - len);
return (char *)m->buf_addr + m->data_off;
}
@@ -1832,8 +1845,11 @@ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail
cur_tail = rte_pktmbuf_lastseg(head);
cur_tail->next = tail;
- /* accumulate number of segments and total length. */
- head->nb_segs += tail->nb_segs;
+ /* accumulate number of segments and total length.
+ * NB: elaborating the addition like this instead of using
+ * -= allows us to ensure the result type is uint16_t
+ * avoiding compiler warnings on gcc 8.1 at least */
+ head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
head->pkt_len += tail->pkt_len;
/* pkt_len is only set in the head */
@@ -1863,7 +1879,11 @@ rte_validate_tx_offload(const struct rte_mbuf *m)
return 0;
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
- inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
+ /* NB: elaborating the addition like this instead of using
+ * += gives the result uint64_t type instead of int,
+ * avoiding compiler warnings on gcc 8.1 at least */
+ inner_l3_offset = inner_l3_offset + m->outer_l2_len +
+ m->outer_l3_len;
/* Headers are fragmented */
if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
@@ -1908,7 +1928,7 @@ rte_validate_tx_offload(const struct rte_mbuf *m)
static inline int
rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
{
- int seg_len, copy_len;
+ size_t seg_len, copy_len;
struct rte_mbuf *m;
struct rte_mbuf *m_next;
char *buffer;
diff --git a/lib/librte_net/rte_ether.h b/lib/librte_net/rte_ether.h
index 06d7b486..f7a9a869 100644
--- a/lib/librte_net/rte_ether.h
+++ b/lib/librte_net/rte_ether.h
@@ -239,7 +239,7 @@ static inline void eth_random_addr(uint8_t *addr)
uint8_t *p = (uint8_t *)&rand;
rte_memcpy(addr, p, ETHER_ADDR_LEN);
- addr[0] &= ~ETHER_GROUP_ADDR; /* clear multicast bit */
+ addr[0] &= (uint8_t)~ETHER_GROUP_ADDR; /* clear multicast bit */
addr[0] |= ETHER_LOCAL_ADMIN_ADDR; /* set local assignment bit */
}
@@ -353,11 +353,12 @@ static inline int rte_vlan_strip(struct rte_mbuf *m)
{
struct ether_hdr *eh
= rte_pktmbuf_mtod(m, struct ether_hdr *);
+ struct vlan_hdr *vh;
if (eh->ether_type != rte_cpu_to_be_16(ETHER_TYPE_VLAN))
return -1;
- struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1);
+ vh = (struct vlan_hdr *)(eh + 1);
m->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci);
diff --git a/lib/librte_net/rte_gre.h b/lib/librte_net/rte_gre.h
index 46568ff5..8a3414cf 100644
--- a/lib/librte_net/rte_gre.h
+++ b/lib/librte_net/rte_gre.h
@@ -43,6 +43,7 @@ extern "C" {
/**
* GRE Header
*/
+__extension__
struct gre_hdr {
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
uint16_t res2:4; /**< Reserved */
diff --git a/lib/librte_net/rte_ip.h b/lib/librte_net/rte_ip.h
index 23468cb9..b22c1f80 100644
--- a/lib/librte_net/rte_ip.h
+++ b/lib/librte_net/rte_ip.h
@@ -284,7 +284,7 @@ rte_raw_cksum_mbuf(const struct rte_mbuf *m, uint32_t off, uint32_t len,
for (;;) {
tmp = __rte_raw_cksum(buf, seglen, 0);
if (done & 1)
- tmp = rte_bswap16(tmp);
+ tmp = rte_bswap16((uint16_t)tmp);
sum += tmp;
done += seglen;
if (done == len)
@@ -315,7 +315,7 @@ rte_ipv4_cksum(const struct ipv4_hdr *ipv4_hdr)
{
uint16_t cksum;
cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct ipv4_hdr));
- return (cksum == 0xffff) ? cksum : ~cksum;
+ return (cksum == 0xffff) ? cksum : (uint16_t)~cksum;
}
/**
@@ -380,8 +380,8 @@ rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr)
uint32_t cksum;
uint32_t l4_len;
- l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) -
- sizeof(struct ipv4_hdr);
+ l4_len = (uint32_t)(rte_be_to_cpu_16(ipv4_hdr->total_length) -
+ sizeof(struct ipv4_hdr));
cksum = rte_raw_cksum(l4_hdr, l4_len);
cksum += rte_ipv4_phdr_cksum(ipv4_hdr, 0);
@@ -391,7 +391,7 @@ rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr)
if (cksum == 0)
cksum = 0xffff;
- return cksum;
+ return (uint16_t)cksum;
}
/**
@@ -437,7 +437,7 @@ rte_ipv6_phdr_cksum(const struct ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
uint32_t proto; /* L4 protocol - top 3 bytes must be zero */
} psd_hdr;
- psd_hdr.proto = (ipv6_hdr->proto << 24);
+ psd_hdr.proto = (uint32_t)(ipv6_hdr->proto << 24);
if (ol_flags & PKT_TX_TCP_SEG) {
psd_hdr.len = 0;
} else {
@@ -480,7 +480,7 @@ rte_ipv6_udptcp_cksum(const struct ipv6_hdr *ipv6_hdr, const void *l4_hdr)
if (cksum == 0)
cksum = 0xffff;
- return cksum;
+ return (uint16_t)cksum;
}
#ifdef __cplusplus
diff --git a/lib/librte_net/rte_net.h b/lib/librte_net/rte_net.h
index 79c764ad..5e80dd89 100644
--- a/lib/librte_net/rte_net.h
+++ b/lib/librte_net/rte_net.h
@@ -124,14 +124,16 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
(ol_flags & PKT_TX_OUTER_IPV6))
inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
- if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
- if (ol_flags & PKT_TX_IPV4) {
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
- inner_l3_offset);
+ if (ol_flags & PKT_TX_IPV4) {
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ inner_l3_offset);
- if (ol_flags & PKT_TX_IP_CKSUM)
- ipv4_hdr->hdr_checksum = 0;
+ if (ol_flags & PKT_TX_IP_CKSUM)
+ ipv4_hdr->hdr_checksum = 0;
+ }
+ if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
+ if (ol_flags & PKT_TX_IPV4) {
udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
m->l3_len);
udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
@@ -148,12 +150,6 @@ rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
} else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
(ol_flags & PKT_TX_TCP_SEG)) {
if (ol_flags & PKT_TX_IPV4) {
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
- inner_l3_offset);
-
- if (ol_flags & PKT_TX_IP_CKSUM)
- ipv4_hdr->hdr_checksum = 0;
-
/* non-TSO tcp or TSO */
tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
m->l3_len);
diff --git a/lib/librte_pci/rte_pci.c b/lib/librte_pci/rte_pci.c
index 0160fc1e..ac0b1a6f 100644
--- a/lib/librte_pci/rte_pci.c
+++ b/lib/librte_pci/rte_pci.c
@@ -59,6 +59,10 @@ get_u8_pciaddr_field(const char *in, void *_u8, char dlm)
uint8_t *u8 = _u8;
char *end;
+ /* empty string is an error though strtoul() returns 0 */
+ if (*in == '\0')
+ return NULL;
+
errno = 0;
val = strtoul(in, &end, 16);
if (errno != 0 || end[0] != dlm || val > UINT8_MAX) {
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
index fc433b05..978c4dd1 100644
--- a/lib/librte_ring/rte_ring.h
+++ b/lib/librte_ring/rte_ring.h
@@ -396,7 +396,7 @@ update_tail(struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
static __rte_always_inline unsigned int
-__rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
+__rte_ring_move_prod_head(struct rte_ring *r, unsigned int is_sp,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
uint32_t *free_entries)
@@ -416,14 +416,13 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
*/
rte_smp_rmb();
- const uint32_t cons_tail = r->cons.tail;
/*
* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* *old_head > cons_tail). So 'free_entries' is always between 0
* and capacity (which is < size).
*/
- *free_entries = (capacity + cons_tail - *old_head);
+ *free_entries = (capacity + r->cons.tail - *old_head);
/* check that we have enough room in ring */
if (unlikely(n > *free_entries))
@@ -466,7 +465,7 @@ __rte_ring_move_prod_head(struct rte_ring *r, int is_sp,
static __rte_always_inline unsigned int
__rte_ring_do_enqueue(struct rte_ring *r, void * const *obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
- int is_sp, unsigned int *free_space)
+ unsigned int is_sp, unsigned int *free_space)
{
uint32_t prod_head, prod_next;
uint32_t free_entries;
@@ -510,7 +509,7 @@ end:
* If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
*/
static __rte_always_inline unsigned int
-__rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
+__rte_ring_move_cons_head(struct rte_ring *r, unsigned int is_sc,
unsigned int n, enum rte_ring_queue_behavior behavior,
uint32_t *old_head, uint32_t *new_head,
uint32_t *entries)
@@ -530,12 +529,11 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
*/
rte_smp_rmb();
- const uint32_t prod_tail = r->prod.tail;
/* The subtraction is done between two unsigned 32bits value
* (the result is always modulo 32 bits even if we have
* cons_head > prod_tail). So 'entries' is always between 0
* and size(ring)-1. */
- *entries = (prod_tail - *old_head);
+ *entries = (r->prod.tail - *old_head);
/* Set the actual entries for dequeue */
if (n > *entries)
@@ -577,7 +575,7 @@ __rte_ring_move_cons_head(struct rte_ring *r, int is_sc,
static __rte_always_inline unsigned int
__rte_ring_do_dequeue(struct rte_ring *r, void **obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior,
- int is_sc, unsigned int *available)
+ unsigned int is_sc, unsigned int *available)
{
uint32_t cons_head, cons_next;
uint32_t entries;
diff --git a/lib/librte_sched/rte_reciprocal.c b/lib/librte_sched/rte_reciprocal.c
index 652f0237..385109cd 100644
--- a/lib/librte_sched/rte_reciprocal.c
+++ b/lib/librte_sched/rte_reciprocal.c
@@ -38,28 +38,13 @@
#include "rte_reciprocal.h"
-/* find largest set bit.
- * portable and slow but does not matter for this usage.
- */
-static inline int fls(uint32_t x)
-{
- int b;
-
- for (b = 31; b >= 0; --b) {
- if (x & (1u << b))
- return b + 1;
- }
-
- return 0;
-}
-
struct rte_reciprocal rte_reciprocal_value(uint32_t d)
{
struct rte_reciprocal R;
uint64_t m;
int l;
- l = fls(d - 1);
+ l = rte_fls_u32(d - 1);
m = ((1ULL << 32) * ((1ULL << l) - d));
m /= d;
diff --git a/lib/librte_table/rte_table_hash_cuckoo.c b/lib/librte_table/rte_table_hash_cuckoo.c
index f3845c75..c6522825 100644
--- a/lib/librte_table/rte_table_hash_cuckoo.c
+++ b/lib/librte_table/rte_table_hash_cuckoo.c
@@ -136,7 +136,7 @@ rte_table_hash_cuckoo_create(void *params,
struct rte_hash_parameters hash_cuckoo_params = {
.entries = p->n_keys,
.key_len = p->key_size,
- .hash_func = (rte_hash_function)(p->f_hash),
+ .hash_func = (rte_hash_function)(void *)(p->f_hash),
.hash_func_init_val = p->seed,
.socket_id = socket_id,
.name = p->name
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 0eb5e0d6..9ab37ae5 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -1204,7 +1204,7 @@ read_vhost_message(int sockfd, struct VhostUserMsg *msg)
if (ret <= 0)
return ret;
- if (msg && msg->size) {
+ if (msg->size) {
if (msg->size > sizeof(msg->payload)) {
RTE_LOG(ERR, VHOST_CONFIG,
"invalid msg size: %d\n", msg->size);
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index bde360f3..8c0c1b4b 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -144,7 +144,7 @@ flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
static __rte_always_inline void
update_shadow_used_ring(struct vhost_virtqueue *vq,
- uint16_t desc_idx, uint16_t len)
+ uint16_t desc_idx, uint32_t len)
{
uint16_t i = vq->shadow_used_idx++;
@@ -560,7 +560,7 @@ static __rte_always_inline int
fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t avail_idx, uint32_t *vec_idx,
struct buf_vector *buf_vec, uint16_t *desc_chain_head,
- uint16_t *desc_chain_len)
+ uint32_t *desc_chain_len)
{
uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
uint32_t vec_id = *vec_idx;
@@ -635,7 +635,7 @@ reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t tries = 0;
uint16_t head_idx = 0;
- uint16_t len = 0;
+ uint32_t len = 0;
*num_buffers = 0;
cur_idx = vq->last_avail_idx;
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index 6a6a7452..98ab58de 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -218,6 +218,9 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt
ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_EAL_NUMA_AWARE_HUGEPAGES),yy)
_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lnuma
endif
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_USE_LIBBSD),yy)
+_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lbsd
+endif
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm
_LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt
_LDLIBS-$(CONFIG_RTE_LIBRTE_MEMBER) += -lm
diff --git a/mk/rte.cpuflags.mk b/mk/rte.cpuflags.mk
index a813c91f..2129510c 100644
--- a/mk/rte.cpuflags.mk
+++ b/mk/rte.cpuflags.mk
@@ -96,6 +96,11 @@ endif
ifneq ($(filter $(AUTO_CPUFLAGS),__AVX512F__),)
ifeq ($(CONFIG_RTE_ENABLE_AVX512),y)
CPUFLAGS += AVX512F
+else
+# disable AVX512F support of gcc as a workaround for Bug 97
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+MACHINE_CFLAGS += -mno-avx512f
+endif
endif
endif
@@ -117,7 +122,7 @@ CPUFLAGS += VSX
endif
# ARM flags
-ifneq ($(filter $(AUTO_CPUFLAGS),__ARM_NEON),)
+ifneq ($(filter __ARM_NEON __aarch64__,$(AUTO_CPUFLAGS)),)
CPUFLAGS += NEON
endif
diff --git a/pkg/dpdk.spec b/pkg/dpdk.spec
index f2486b9a..9485ced5 100644
--- a/pkg/dpdk.spec
+++ b/pkg/dpdk.spec
@@ -30,7 +30,7 @@
# OF THE POSSIBILITY OF SUCH DAMAGE.
Name: dpdk
-Version: 17.11.4
+Version: 17.11.5
Release: 1
Packager: packaging@6wind.com
URL: http://dpdk.org
diff --git a/test/test/test_acl.h b/test/test/test_acl.h
index 421f3109..6f5c485a 100644
--- a/test/test/test_acl.h
+++ b/test/test/test_acl.h
@@ -109,34 +109,40 @@ enum {
struct rte_acl_ipv4vlan_rule invalid_layout_rules[] = {
/* test src and dst address */
{
- .data = {.userdata = 1, .category_mask = 1},
+ .data = {.userdata = 1, .category_mask = 1,
+ .priority = 1},
.src_addr = IPv4(10,0,0,0),
.src_mask_len = 24,
},
{
- .data = {.userdata = 2, .category_mask = 1},
+ .data = {.userdata = 2, .category_mask = 1,
+ .priority = 1},
.dst_addr = IPv4(10,0,0,0),
.dst_mask_len = 24,
},
/* test src and dst ports */
{
- .data = {.userdata = 3, .category_mask = 1},
+ .data = {.userdata = 3, .category_mask = 1,
+ .priority = 1},
.dst_port_low = 100,
.dst_port_high = 100,
},
{
- .data = {.userdata = 4, .category_mask = 1},
+ .data = {.userdata = 4, .category_mask = 1,
+ .priority = 1},
.src_port_low = 100,
.src_port_high = 100,
},
/* test proto */
{
- .data = {.userdata = 5, .category_mask = 1},
+ .data = {.userdata = 5, .category_mask = 1,
+ .priority = 1},
.proto = 0xf,
.proto_mask = 0xf
},
{
- .data = {.userdata = 6, .category_mask = 1},
+ .data = {.userdata = 6, .category_mask = 1,
+ .priority = 1},
.dst_port_low = 0xf,
.dst_port_high = 0xf,
}
diff --git a/test/test/test_bitmap.c b/test/test/test_bitmap.c
index 7045d332..e8c3a780 100644
--- a/test/test/test_bitmap.c
+++ b/test/test/test_bitmap.c
@@ -129,6 +129,7 @@ test_bitmap_slab_set_get(struct rte_bitmap *bmp)
static int
test_bitmap_set_get_clear(struct rte_bitmap *bmp)
{
+ uint64_t val;
int i;
rte_bitmap_reset(bmp);
@@ -152,6 +153,23 @@ test_bitmap_set_get_clear(struct rte_bitmap *bmp)
}
}
+ rte_bitmap_reset(bmp);
+
+ /* Alternate slab set test */
+ for (i = 0; i < MAX_BITS; i++) {
+ if (i % RTE_BITMAP_SLAB_BIT_SIZE)
+ rte_bitmap_set(bmp, i);
+ }
+
+ for (i = 0; i < MAX_BITS; i++) {
+ val = rte_bitmap_get(bmp, i);
+ if (((i % RTE_BITMAP_SLAB_BIT_SIZE) && !val) ||
+ (!(i % RTE_BITMAP_SLAB_BIT_SIZE) && val)) {
+ printf("Failed to get set bit.\n");
+ return TEST_FAILED;
+ }
+ }
+
return TEST_SUCCESS;
}
diff --git a/test/test/test_common.c b/test/test/test_common.c
index ae3482da..da2ff75b 100644
--- a/test/test/test_common.c
+++ b/test/test/test_common.c
@@ -180,6 +180,37 @@ test_log2(void)
}
static int
+test_fls(void)
+{
+ struct fls_test_vector {
+ uint32_t arg;
+ int rc;
+ };
+ int expected, rc;
+ uint32_t i, arg;
+
+ const struct fls_test_vector test[] = {
+ {0x0, 0},
+ {0x1, 1},
+ {0x4000, 15},
+ {0x80000000, 32},
+ };
+
+ for (i = 0; i < RTE_DIM(test); i++) {
+ arg = test[i].arg;
+ rc = rte_fls_u32(arg);
+ expected = test[i].rc;
+ if (rc != expected) {
+ printf("Wrong rte_fls_u32(0x%x) rc=%d, expected=%d\n",
+ arg, rc, expected);
+ return TEST_FAILED;
+ }
+ }
+
+ return 0;
+}
+
+static int
test_common(void)
{
int ret = 0;
@@ -187,6 +218,7 @@ test_common(void)
ret |= test_macros(0);
ret |= test_misc();
ret |= test_log2();
+ ret |= test_fls();
return ret;
}
diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
index 28f982f4..5136daf9 100644
--- a/test/test/test_cryptodev.c
+++ b/test/test/test_cryptodev.c
@@ -608,7 +608,7 @@ test_device_configure_invalid_queue_pair_ids(void)
/* valid - max value queue pairs */
- ts_params->conf.nb_queue_pairs = MAX_NUM_QPS_PER_QAT_DEVICE;
+ ts_params->conf.nb_queue_pairs = orig_nb_qps;
TEST_ASSERT_SUCCESS(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf),
@@ -640,7 +640,7 @@ test_device_configure_invalid_queue_pair_ids(void)
/* invalid - max value + 1 queue pairs */
- ts_params->conf.nb_queue_pairs = MAX_NUM_QPS_PER_QAT_DEVICE + 1;
+ ts_params->conf.nb_queue_pairs = orig_nb_qps + 1;
TEST_ASSERT_FAIL(rte_cryptodev_configure(ts_params->valid_devs[0],
&ts_params->conf),
@@ -792,7 +792,7 @@ test_queue_pair_descriptor_setup(void)
/* test invalid queue pair id */
qp_conf.nb_descriptors = DEFAULT_NUM_OPS_INFLIGHT; /*valid */
- qp_id = DEFAULT_NUM_QPS_PER_QAT_DEVICE; /*invalid */
+ qp_id = ts_params->conf.nb_queue_pairs; /*invalid */
TEST_ASSERT_FAIL(rte_cryptodev_queue_pair_setup(
ts_params->valid_devs[0],
diff --git a/test/test/test_eventdev.c b/test/test/test_eventdev.c
index 1ed2a1dd..d51f94c3 100644
--- a/test/test/test_eventdev.c
+++ b/test/test/test_eventdev.c
@@ -218,15 +218,18 @@ test_eventdev_configure(void)
"Config negative test failed");
TEST_ASSERT_EQUAL(-EINVAL,
test_ethdev_config_run(&dev_conf, &info, max_event_queue_flows),
- "Config negative test failed");
- TEST_ASSERT_EQUAL(-EINVAL,
- test_ethdev_config_run(&dev_conf, &info,
- max_event_port_dequeue_depth),
- "Config negative test failed");
- TEST_ASSERT_EQUAL(-EINVAL,
- test_ethdev_config_run(&dev_conf, &info,
- max_event_port_enqueue_depth),
- "Config negative test failed");
+ "Config negative test failed");
+
+ if (info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) {
+ TEST_ASSERT_EQUAL(-EINVAL,
+ test_ethdev_config_run(&dev_conf, &info,
+ max_event_port_dequeue_depth),
+ "Config negative test failed");
+ TEST_ASSERT_EQUAL(-EINVAL,
+ test_ethdev_config_run(&dev_conf, &info,
+ max_event_port_enqueue_depth),
+ "Config negative test failed");
+ }
/* Positive case */
devconf_set_default_sane_values(&dev_conf, &info);
diff --git a/test/test/test_hash.c b/test/test/test_hash.c
index 4668cf1a..2218142a 100644
--- a/test/test/test_hash.c
+++ b/test/test/test_hash.c
@@ -109,29 +109,23 @@ static uint32_t pseudo_hash(__attribute__((unused)) const void *keys,
return 3;
}
+#define UNIT_TEST_HASH_VERBOSE 0
/*
* Print out result of unit test hash operation.
*/
-#if defined(UNIT_TEST_HASH_VERBOSE)
static void print_key_info(const char *msg, const struct flow_key *key,
int32_t pos)
{
- uint8_t *p = (uint8_t *)key;
- unsigned i;
-
- printf("%s key:0x", msg);
- for (i = 0; i < sizeof(struct flow_key); i++) {
- printf("%02X", p[i]);
+ if (UNIT_TEST_HASH_VERBOSE) {
+ const uint8_t *p = (const uint8_t *)key;
+ unsigned int i;
+
+ printf("%s key:0x", msg);
+ for (i = 0; i < sizeof(struct flow_key); i++)
+ printf("%02X", p[i]);
+ printf(" @ pos %d\n", pos);
}
- printf(" @ pos %d\n", pos);
-}
-#else
-static void print_key_info(__attribute__((unused)) const char *msg,
- __attribute__((unused)) const struct flow_key *key,
- __attribute__((unused)) int32_t pos)
-{
}
-#endif
/* Keys used by unit test functions */
static struct flow_key keys[5] = { {
diff --git a/test/test/test_hash_perf.c b/test/test/test_hash_perf.c
index c0051b20..b0514b10 100644
--- a/test/test/test_hash_perf.c
+++ b/test/test/test_hash_perf.c
@@ -49,7 +49,8 @@
#define MAX_ENTRIES (1 << 19)
#define KEYS_TO_ADD (MAX_ENTRIES * 3 / 4) /* 75% table utilization */
#define NUM_LOOKUPS (KEYS_TO_ADD * 5) /* Loop among keys added, several times */
-#define BUCKET_SIZE 4
+/* BUCKET_SIZE should be same as RTE_HASH_BUCKET_ENTRIES in rte_hash library */
+#define BUCKET_SIZE 8
#define NUM_BUCKETS (MAX_ENTRIES / BUCKET_SIZE)
#define MAX_KEYSIZE 64
#define NUM_KEYSIZES 10
diff --git a/test/test/test_pmd_ring_perf.c b/test/test/test_pmd_ring_perf.c
index 8e9cd331..e4467668 100644
--- a/test/test/test_pmd_ring_perf.c
+++ b/test/test/test_pmd_ring_perf.c
@@ -39,6 +39,7 @@
#include <rte_launch.h>
#include <rte_ethdev.h>
#include <rte_eth_ring.h>
+#include <rte_bus_vdev.h>
#include "test.h"
@@ -164,6 +165,8 @@ test_bulk_enqueue_dequeue(void)
static int
test_ring_pmd_perf(void)
{
+ char name[RTE_ETH_NAME_MAX_LEN];
+
r = rte_ring_create(RING_NAME, RING_SIZE, rte_socket_id(),
RING_F_SP_ENQ|RING_F_SC_DEQ);
if (r == NULL && (r = rte_ring_lookup(RING_NAME)) == NULL)
@@ -180,6 +183,11 @@ test_ring_pmd_perf(void)
printf("\n### Testing using a single lcore ###\n");
test_bulk_enqueue_dequeue();
+ /* release port and ring resources */
+ rte_eth_dev_stop(ring_ethdev_port);
+ rte_eth_dev_get_name_by_port(ring_ethdev_port, name);
+ rte_vdev_uninit(name);
+ rte_ring_free(r);
return 0;
}
diff --git a/test/test/test_reorder.c b/test/test/test_reorder.c
index 6f2c2306..d5f9d229 100644
--- a/test/test/test_reorder.c
+++ b/test/test/test_reorder.c
@@ -298,7 +298,7 @@ test_reorder_drain(void)
goto exit;
}
if (robufs[0] != NULL)
- rte_pktmbuf_free(robufs[i]);
+ rte_pktmbuf_free(robufs[0]);
/* Insert more packets
* RB[] = {NULL, NULL, NULL, NULL}
diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index df9b21a6..05711fdc 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -705,6 +705,13 @@ def do_arg_actions():
def main():
'''program main function'''
+ # check if lspci is installed, suppress any output
+ with open(os.devnull, 'w') as devnull:
+ ret = subprocess.call(['which', 'lspci'],
+ stdout=devnull, stderr=devnull)
+ if ret != 0:
+ print("'lspci' not found - please install 'pciutils'")
+ sys.exit(1)
parse_args()
check_modules()
clear_data()