aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-08-30 15:07:26 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-08-30 15:08:21 +0100
commitfdd2322bb45e83d3fd96b06ea32a4afbb60bcb6f (patch)
treec28b354e5687537e9e5729c625a378b9f46cab37
parentaab0c291a90f701b60f8c9a88cbcc265cba0ec8b (diff)
New upstream version 16.11.3
Change-Id: Ieb0ca817d9390060d3f128db691dc76bc0494727 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
-rw-r--r--app/test/test_cryptodev.c20
-rw-r--r--app/test/test_cryptodev_hash_test_vectors.h36
-rw-r--r--app/test/test_cryptodev_perf.c15
-rw-r--r--app/test/test_link_bonding.c66
-rw-r--r--app/test/test_link_bonding_mode4.c8
-rw-r--r--app/test/test_link_bonding_rssconf.c2
-rw-r--r--doc/guides/cryptodevs/aesni_mb.rst1
-rw-r--r--doc/guides/cryptodevs/qat.rst1
-rw-r--r--doc/guides/rel_notes/release_16_11.rst84
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c24
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c24
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c6
-rw-r--r--drivers/crypto/qat/qat_crypto.c26
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c76
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c4
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c29
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c20
-rw-r--r--drivers/net/cxgbe/base/t4_regs.h18
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c4
-rw-r--r--drivers/net/e1000/e1000_ethdev.h2
-rw-r--r--drivers/net/e1000/igb_ethdev.c29
-rw-r--r--drivers/net/e1000/igb_rxtx.c6
-rw-r--r--drivers/net/ena/ena_ethdev.c8
-rw-r--r--drivers/net/enic/base/vnic_dev.c4
-rw-r--r--drivers/net/enic/enic_rxtx.c3
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c3
-rw-r--r--drivers/net/i40e/base/i40e_register.h2
-rw-r--r--drivers/net/i40e/i40e_ethdev.c77
-rw-r--r--drivers/net/i40e/i40e_ethdev.h5
-rw-r--r--drivers/net/i40e/i40e_fdir.c8
-rw-r--r--drivers/net/i40e/i40e_rxtx.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c7
-rw-r--r--drivers/net/mlx4/mlx4.c19
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c28
-rw-r--r--drivers/net/mlx5/mlx5_fdir.c7
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c14
-rw-r--r--drivers/net/mlx5/mlx5_txq.c16
-rw-r--r--drivers/net/qede/qede_ethdev.c5
-rw-r--r--drivers/net/virtio/virtio_ethdev.c34
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c5
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c6
-rw-r--r--examples/l2fwd-crypto/main.c40
-rw-r--r--examples/l3fwd/l3fwd_em.c2
-rw-r--r--examples/qos_sched/main.h5
-rw-r--r--lib/librte_cryptodev/rte_cryptodev.c2
-rw-r--r--lib/librte_eal/bsdapp/contigmem/contigmem.c197
-rw-r--r--lib/librte_eal/common/eal_common_proc.c8
-rw-r--r--lib/librte_eal/common/include/rte_version.h2
-rw-r--r--lib/librte_eal/common/malloc_elem.c9
-rw-r--r--lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h2
-rw-r--r--lib/librte_lpm/rte_lpm.c4
-rw-r--r--lib/librte_mbuf/rte_mbuf.h5
-rw-r--r--lib/librte_mbuf/rte_mbuf_ptype.h4
-rw-r--r--lib/librte_vhost/vhost_user.c4
-rw-r--r--lib/librte_vhost/virtio_net.c37
-rw-r--r--pkg/dpdk.spec2
57 files changed, 771 insertions, 307 deletions
diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index 872f8b43..b544ab9c 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -1545,6 +1545,22 @@ test_authonly_openssl_all(void)
return TEST_SUCCESS;
}
+static int
+test_authonly_qat_all(void)
+{
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+ int status;
+
+ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
+ ts_params->op_mpool, ts_params->valid_devs[0],
+ RTE_CRYPTODEV_QAT_SYM_PMD,
+ BLKCIPHER_AUTHONLY_TYPE);
+
+ TEST_ASSERT_EQUAL(status, 0, "Test failed");
+
+ return TEST_SUCCESS;
+}
+
/* ***** SNOW 3G Tests ***** */
static int
create_wireless_algo_hash_session(uint8_t dev_id,
@@ -2944,8 +2960,7 @@ test_snow3g_encryption_offset_oop(const struct snow3g_test_data *tdata)
rte_hexdump(stdout, "ciphertext:", ciphertext, plaintext_len);
#endif
- expected_ciphertext_shifted = rte_malloc(NULL,
- ceil_byte_length(plaintext_len + extra_offset), 0);
+ expected_ciphertext_shifted = rte_malloc(NULL, plaintext_len, 8);
TEST_ASSERT_NOT_NULL(expected_ciphertext_shifted,
"failed to reserve memory for ciphertext shifted\n");
@@ -6035,6 +6050,7 @@ static struct unit_test_suite cryptodev_qat_testsuite = {
TEST_CASE_ST(ut_setup, ut_teardown, test_3DES_chain_qat_all),
TEST_CASE_ST(ut_setup, ut_teardown,
test_3DES_cipheronly_qat_all),
+ TEST_CASE_ST(ut_setup, ut_teardown, test_authonly_qat_all),
TEST_CASE_ST(ut_setup, ut_teardown, test_stats),
/** AES GCM Authenticated Encryption */
diff --git a/app/test/test_cryptodev_hash_test_vectors.h b/app/test/test_cryptodev_hash_test_vectors.h
index 9f095cf3..1daf681a 100644
--- a/app/test/test_cryptodev_hash_test_vectors.h
+++ b/app/test/test_cryptodev_hash_test_vectors.h
@@ -358,13 +358,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-MD5 Digest",
.test_data = &hmac_md5_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-MD5 Digest Verify",
.test_data = &hmac_md5_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA1 Digest",
@@ -382,13 +384,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-SHA1 Digest",
.test_data = &hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-SHA1 Digest Verify",
.test_data = &hmac_sha1_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA224 Digest",
@@ -406,13 +410,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-SHA224 Digest",
.test_data = &hmac_sha224_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-SHA224 Digest Verify",
.test_data = &hmac_sha224_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA256 Digest",
@@ -430,13 +436,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-SHA256 Digest",
.test_data = &hmac_sha256_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-SHA256 Digest Verify",
.test_data = &hmac_sha256_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA384 Digest",
@@ -454,13 +462,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-SHA384 Digest",
.test_data = &hmac_sha384_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-SHA384 Digest Verify",
.test_data = &hmac_sha384_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "SHA512 Digest",
@@ -478,13 +488,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
.test_descr = "HMAC-SHA512 Digest",
.test_data = &hmac_sha512_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
{
.test_descr = "HMAC-SHA512 Digest Verify",
.test_data = &hmac_sha512_test_vector,
.op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT
},
};
diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c
index 89a67952..2a313497 100644
--- a/app/test/test_cryptodev_perf.c
+++ b/app/test/test_cryptodev_perf.c
@@ -2475,6 +2475,11 @@ static uint8_t aes_key[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
+static uint8_t aes_gcm_aad[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
static uint8_t aes_iv[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
@@ -2687,7 +2692,7 @@ test_perf_create_openssl_session(uint8_t dev_id, enum chain_mode chain,
#define AES_BLOCK_SIZE 16
#define AES_CIPHER_IV_LENGTH 16
-
+#define AES_GCM_AAD_LENGTH 16
#define TRIPLE_DES_BLOCK_SIZE 8
#define TRIPLE_DES_CIPHER_IV_LENGTH 8
@@ -2722,8 +2727,6 @@ test_perf_set_crypto_op_aes(struct rte_crypto_op *op, struct rte_mbuf *m,
op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
AES_CIPHER_IV_LENGTH + data_len);
op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = aes_iv;
- op->sym->auth.aad.length = AES_CIPHER_IV_LENGTH;
/* Cipher Parameters */
op->sym->cipher.iv.data = rte_pktmbuf_mtod(m, uint8_t *);
@@ -2760,8 +2763,8 @@ test_perf_set_crypto_op_aes_gcm(struct rte_crypto_op *op, struct rte_mbuf *m,
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len);
op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = aes_iv;
- op->sym->auth.aad.length = AES_CIPHER_IV_LENGTH;
+ op->sym->auth.aad.data = aes_gcm_aad;
+ op->sym->auth.aad.length = AES_GCM_AAD_LENGTH;
/* Cipher Parameters */
op->sym->cipher.iv.data = aes_iv;
@@ -2893,8 +2896,6 @@ test_perf_set_crypto_op_3des(struct rte_crypto_op *op, struct rte_mbuf *m,
op->sym->auth.digest.phys_addr =
rte_pktmbuf_mtophys_offset(m, data_len);
op->sym->auth.digest.length = digest_len;
- op->sym->auth.aad.data = triple_des_iv;
- op->sym->auth.aad.length = TRIPLE_DES_CIPHER_IV_LENGTH;
/* Cipher Parameters */
op->sym->cipher.iv.data = triple_des_iv;
diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c
index 52d2d052..2bbf6b5e 100644
--- a/app/test/test_link_bonding.c
+++ b/app/test/test_link_bonding.c
@@ -221,6 +221,10 @@ static struct rte_eth_txconf tx_conf_default = {
};
+static void free_virtualpmd_tx_queue(void);
+
+
+
static int
configure_ethdev(uint8_t port_id, uint8_t start, uint8_t en_isr)
{
@@ -684,6 +688,7 @@ static int
remove_slaves_and_stop_bonded_device(void)
{
/* Clean up and remove slaves from bonded device */
+ free_virtualpmd_tx_queue();
while (test_params->bonded_slave_count > 0)
TEST_ASSERT_SUCCESS(test_remove_slave_from_bonded_device(),
"test_remove_slave_from_bonded_device failed");
@@ -1617,9 +1622,6 @@ test_roundrobin_rx_burst_on_single_slave(void)
/* free mbufs */
for (i = 0; i < MAX_PKT_BURST; i++) {
- if (gen_pkt_burst[i] != NULL)
- rte_pktmbuf_free(gen_pkt_burst[i]);
-
if (rx_pkt_burst[i] != NULL)
rte_pktmbuf_free(rx_pkt_burst[i]);
}
@@ -1966,12 +1968,6 @@ test_roundrobin_verify_slave_link_status_change_behaviour(void)
for (i = 0; i < MAX_PKT_BURST; i++) {
if (rx_pkt_burst[i] != NULL)
rte_pktmbuf_free(rx_pkt_burst[i]);
-
- if (gen_pkt_burst[1][i] != NULL)
- rte_pktmbuf_free(gen_pkt_burst[1][i]);
-
- if (gen_pkt_burst[3][i] != NULL)
- rte_pktmbuf_free(gen_pkt_burst[1][i]);
}
/* Clean up and remove slaves from bonded device */
@@ -2410,7 +2406,7 @@ test_activebackup_verify_slave_link_status_change_failover(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count, primary_port;
+ int i, burst_size, slave_count, primary_port;
burst_size = 21;
@@ -2543,16 +2539,6 @@ test_activebackup_verify_slave_link_status_change_failover(void)
"(%d) port_stats.opackets not as expected",
test_params->slave_port_ids[3]);
- /* free mbufs */
- for (i = 0; i < TEST_ACTIVE_BACKUP_RX_BURST_SLAVE_COUNT; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
@@ -2785,7 +2771,7 @@ balance_l23_tx_burst(uint8_t vlan_enabled, uint8_t ipv4,
static int
test_balance_l23_tx_burst_ipv4_toggle_ip_addr(void)
{
- return balance_l23_tx_burst(0, 1, 1, 0);
+ return balance_l23_tx_burst(0, 1, 0, 1);
}
static int
@@ -3314,7 +3300,7 @@ test_balance_verify_slave_link_status_change_behaviour(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count;
+ int i, burst_size, slave_count;
memset(pkt_burst, 0, sizeof(pkt_burst));
@@ -3452,16 +3438,6 @@ test_balance_verify_slave_link_status_change_behaviour(void)
test_params->bonded_port_id, (int)port_stats.ipackets,
burst_size * 3);
- /* free mbufs allocate for rx testing */
- for (i = 0; i < TEST_BALANCE_RX_BURST_SLAVE_COUNT; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
@@ -3883,7 +3859,7 @@ test_broadcast_verify_slave_link_status_change_behaviour(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count;
+ int i, burst_size, slave_count;
memset(pkt_burst, 0, sizeof(pkt_burst));
@@ -3980,16 +3956,6 @@ test_broadcast_verify_slave_link_status_change_behaviour(void)
"(%d) port_stats.ipackets not as expected\n",
test_params->bonded_port_id);
- /* free mbufs allocate for rx testing */
- for (i = 0; i < BROADCAST_LINK_STATUS_NUM_OF_SLAVES; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
@@ -4405,7 +4371,7 @@ test_tlb_verify_slave_link_status_change_failover(void)
uint8_t slaves[RTE_MAX_ETHPORTS];
- int i, j, burst_size, slave_count, primary_port;
+ int i, burst_size, slave_count, primary_port;
burst_size = 21;
@@ -4523,18 +4489,6 @@ test_tlb_verify_slave_link_status_change_failover(void)
"(%d) port_stats.ipackets not as expected\n",
test_params->bonded_port_id);
- /* free mbufs */
-
- for (i = 0; i < TEST_ADAPTIVE_TRANSMIT_LOAD_BALANCING_RX_BURST_SLAVE_COUNT; i++) {
- for (j = 0; j < MAX_PKT_BURST; j++) {
- if (pkt_burst[i][j] != NULL) {
- rte_pktmbuf_free(pkt_burst[i][j]);
- pkt_burst[i][j] = NULL;
- }
- }
- }
-
-
/* Clean up and remove slaves from bonded device */
return remove_slaves_and_stop_bonded_device();
}
diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c
index dc339735..292ea3d0 100644
--- a/app/test/test_link_bonding_mode4.c
+++ b/app/test/test_link_bonding_mode4.c
@@ -73,11 +73,11 @@
#define MAX_PKT_BURST (32)
#define DEF_PKT_BURST (16)
-#define BONDED_DEV_NAME ("unit_test_mode4_bond_dev")
+#define BONDED_DEV_NAME ("ut_mode4_bond_dev")
-#define SLAVE_DEV_NAME_FMT ("unit_test_mode4_slave_%d")
-#define SLAVE_RX_QUEUE_FMT ("unit_test_mode4_slave_%d_rx")
-#define SLAVE_TX_QUEUE_FMT ("unit_test_mode4_slave_%d_tx")
+#define SLAVE_DEV_NAME_FMT ("ut_mode4_slave_%d")
+#define SLAVE_RX_QUEUE_FMT ("ut_mode4_slave_%d_rx")
+#define SLAVE_TX_QUEUE_FMT ("ut_mode4_slave_%d_tx")
#define INVALID_SOCKET_ID (-1)
#define INVALID_PORT_ID (0xFF)
diff --git a/app/test/test_link_bonding_rssconf.c b/app/test/test_link_bonding_rssconf.c
index e11da9bf..23052a2c 100644
--- a/app/test/test_link_bonding_rssconf.c
+++ b/app/test/test_link_bonding_rssconf.c
@@ -61,7 +61,7 @@
#define RXTX_RING_SIZE 1024
#define RXTX_QUEUE_COUNT 4
-#define BONDED_DEV_NAME ("rssconf_bond_dev")
+#define BONDED_DEV_NAME ("net_bonding_rss")
#define SLAVE_DEV_NAME_FMT ("rssconf_slave%d")
#define SLAVE_RXTX_QUEUE_FMT ("rssconf_slave%d_q%d")
diff --git a/doc/guides/cryptodevs/aesni_mb.rst b/doc/guides/cryptodevs/aesni_mb.rst
index e812e957..10e5473d 100644
--- a/doc/guides/cryptodevs/aesni_mb.rst
+++ b/doc/guides/cryptodevs/aesni_mb.rst
@@ -65,7 +65,6 @@ Limitations
* Hash only is not supported.
* Cipher only is not supported.
* Only in-place is currently supported (destination address is the same as source address).
-* Only supports session-oriented API implementation (session-less APIs are not supported).
* Not performance tuned.
Installation
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index a7b28eb9..2173819e 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -74,7 +74,6 @@ Limitations
-----------
* Chained mbufs are not supported.
-* Hash only is not supported except SNOW 3G UIA2 and KASUMI F9.
* Cipher only is not supported except SNOW 3G UEA2, KASUMI F8 and 3DES.
* Only supports the session-oriented API implementation (session-less APIs are not supported).
* SNOW 3G (UEA2) and KASUMI (F8) supported only if cipher length, cipher offset fields are byte-aligned.
diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst
index 85972b17..45cdbf63 100644
--- a/doc/guides/rel_notes/release_16_11.rst
+++ b/doc/guides/rel_notes/release_16_11.rst
@@ -829,3 +829,87 @@ Fixes in 16.11 LTS Release
* vhost: fix max queues
* vhost: fix multiple queue not enabled for old kernels
* vhost: fix use after free
+
+16.11.3
+~~~~~~~
+
+* contigmem: do not zero pages during each mmap
+* contigmem: free allocated memory on error
+* crypto/aesni_mb: fix HMAC supported key sizes
+* cryptodev: fix device stop function
+* crypto/openssl: fix HMAC supported key sizes
+* crypto/qat: fix HMAC supported key sizes
+* crypto/qat: fix NULL authentication hang
+* crypto/qat: fix SHA384-HMAC block size
+* doc: remove incorrect limitation on AESNI-MB PMD
+* doc: remove incorrect limitation on QAT PMD
+* eal: fix config file path when checking process
+* examples/l2fwd-crypto: fix application help
+* examples/l2fwd-crypto: fix option parsing
+* examples/l2fwd-crypto: fix padding
+* examples/l3fwd: fix IPv6 packet type parse
+* examples/qos_sched: fix build for less lcores
+* ip_frag: free mbufs on reassembly table destroy
+* kni: fix build with gcc 7.1
+* lpm: fix index of tbl8
+* mbuf: fix debug checks for headroom and tailroom
+* mbuf: fix doxygen comment of bulk alloc
+* mbuf: fix VXLAN port in comment
+* mem: fix malloc element resize with padding
+* net/bnxt: check invalid L2 filter id
+* net/bnxt: enable default VNIC allocation
+* net/bnxt: fix autoneg on 10GBase-T links
+* net/bnxt: fix get link config
+* net/bnxt: fix reporting of link status
+* net/bnxt: fix set link config
+* net/bnxt: fix set link config
+* net/bnxt: fix vnic cleanup
+* net/bnxt: free filter before reusing it
+* net/bonding: change link status check to no-wait
+* net/bonding: fix number of bonding Tx/Rx queues
+* net/bonding: fix when NTT flag updated
+* net/cxgbe: fix port statistics
+* net/e1000: fix LSC interrupt
+* net/ena: fix cleanup of the Tx bufs
+* net/enic: fix build with gcc 7.1
+* net/enic: fix crash when freeing 0 packet to mempool
+* net/fm10k: initialize link status in device start
+* net/i40e: add return value checks
+* net/i40e/base: fix Tx error stats on VF
+* net/i40e: exclude internal packet's byte count
+* net/i40e: fix division by 0
+* net/i40e: fix ethertype filter for new FW
+* net/i40e: fix link down and negotiation
+* net/i40e: fix Rx data segment buffer length
+* net/i40e: fix VF statistics
+* net/igb: fix add/delete of flex filters
+* net/igb: fix checksum valid flags
+* net/igb: fix flex filter length
+* net/ixgbe: fix mirror rule index overflow
+* net/ixgbe: fix Rx/Tx queue interrupt for x550 devices
+* net/mlx4: fix mbuf poisoning in debug code
+* net/mlx4: fix probe failure report
+* net/mlx5: fix build with gcc 7.1
+* net/mlx5: fix completion buffer size
+* net/mlx5: fix exception handling
+* net/mlx5: fix inconsistent link status query
+* net/mlx5: fix redundant free of Tx buffer
+* net/qede: fix chip details print
+* net/virtio: do not claim to support LRO
+* net/virtio: do not falsely claim to do IP checksum
+* net/virtio-user: fix crash when detaching device
+* net/virtio: zero the whole memory zone
+* net/vmxnet3: fix filtering on promiscuous disabling
+* net/vmxnet3: fix receive queue memory leak
+* Revert "ip_frag: free mbufs on reassembly table destroy"
+* test/bonding: fix memory corruptions
+* test/bonding: fix mode 4 names
+* test/bonding: fix namespace of the RSS tests
+* test/bonding: fix parameters of a balance Tx
+* test/crypto: fix overflow
+* test/crypto: fix wrong AAD setting
+* vhost: fix checking of device features
+* vhost: fix guest pages memory leak
+* vhost: fix IP checksum
+* vhost: fix TCP checksum
+* vhost: make page logging atomic
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 3d49e2ae..287c8a50 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 12,
@@ -69,9 +69,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 12,
@@ -90,9 +90,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 14,
@@ -111,9 +111,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
@@ -132,9 +132,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 24,
@@ -153,9 +153,9 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 139fed14..a072e6e3 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 16,
@@ -90,9 +90,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 20,
@@ -132,9 +132,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 28,
@@ -174,9 +174,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
@@ -216,9 +216,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 48,
@@ -258,9 +258,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 64,
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 8900668d..1c4b184e 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -99,6 +99,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@@ -761,6 +764,9 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_NULL:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_NULL);
+ state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
state1_size = qat_hash_get_state1_size(
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 0fe0b232..f7fcecea 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -76,9 +76,9 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 20,
@@ -97,9 +97,9 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 28,
@@ -118,9 +118,9 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
.block_size = 64,
.key_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 32,
@@ -137,11 +137,11 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
{.auth = {
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
- .block_size = 64,
+ .block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 1
},
.digest_size = {
.min = 48,
@@ -160,9 +160,9 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
.block_size = 128,
.key_size = {
- .min = 128,
+ .min = 1,
.max = 128,
- .increment = 0
+ .increment = 128
},
.digest_size = {
.min = 64,
@@ -181,9 +181,9 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
.algo = RTE_CRYPTO_AUTH_MD5_HMAC,
.block_size = 64,
.key_size = {
- .min = 8,
+ .min = 1,
.max = 64,
- .increment = 8
+ .increment = 1
},
.digest_size = {
.min = 16,
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 07e71241..93910d81 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -194,6 +194,9 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp,
struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
+ if (filter->fw_l2_filter_id == UINT64_MAX)
+ return 0;
+
HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
@@ -216,6 +219,9 @@ int bnxt_hwrm_set_filter(struct bnxt *bp,
struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
+ if (filter->fw_l2_filter_id != UINT64_MAX)
+ bnxt_hwrm_clear_filter(bp, filter);
+
HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
req.flags = rte_cpu_to_le_32(filter->flags);
@@ -476,6 +482,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
struct hwrm_port_phy_cfg_input req = {0};
struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint32_t enables = 0;
+ uint32_t link_speed_mask =
+ HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
@@ -487,14 +495,20 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
* any auto mode, even "none".
*/
if (!conf->link_speed) {
- req.auto_mode |= conf->auto_mode;
- enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
- req.auto_link_speed_mask = conf->auto_link_speed_mask;
- enables |=
- HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
- req.auto_link_speed = bp->link_info.auto_link_speed;
- enables |=
+ req.auto_mode = conf->auto_mode;
+ enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
+ if (conf->auto_mode ==
+ HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
+ req.auto_link_speed_mask =
+ conf->auto_link_speed_mask;
+ enables |= link_speed_mask;
+ }
+ if (bp->link_info.auto_link_speed) {
+ req.auto_link_speed =
+ bp->link_info.auto_link_speed;
+ enables |=
HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
+ }
}
req.auto_duplex = conf->duplex;
enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
@@ -534,13 +548,10 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
HWRM_CHECK_RESULT;
link_info->phy_link_status = resp->link;
- if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
- link_info->link_up = 1;
- link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
- } else {
- link_info->link_up = 0;
- link_info->link_speed = 0;
- }
+ link_info->link_up =
+ (link_info->phy_link_status ==
+ HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
+ link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
link_info->duplex = resp->duplex;
link_info->pause = resp->pause;
link_info->auto_pause = resp->auto_pause;
@@ -829,11 +840,14 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
HWRM_PREP(req, VNIC_ALLOC, -1, resp);
+ if (vnic->func_default)
+ req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
HWRM_CHECK_RESULT;
vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
+ RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}
@@ -843,6 +857,11 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_cfg_input req = {.req_type = 0 };
struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
HWRM_PREP(req, VNIC_CFG, -1, resp);
/* Only RSS support for now TBD: COS & LB */
@@ -885,6 +904,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
HWRM_CHECK_RESULT;
vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
+ RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
return rc;
}
@@ -896,6 +916,12 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
bp->hwrm_cmd_resp_addr;
+ if (vnic->fw_rss_cos_lb_ctx == 0xffff) {
+ RTE_LOG(DEBUG, PMD,
+ "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
+ return rc;
+ }
+
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
@@ -915,8 +941,10 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_free_input req = {.req_type = 0 };
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
- if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
return rc;
+ }
HWRM_PREP(req, VNIC_FREE, -1, resp);
@@ -1335,12 +1363,16 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
return 0;
}
-static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
+static uint16_t
+bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
{
uint16_t ret = 0;
- if (link_speed == ETH_LINK_SPEED_AUTONEG)
+ if (link_speed == ETH_LINK_SPEED_AUTONEG) {
+ if (bp->link_info.support_speeds)
+ return bp->link_info.support_speeds;
link_speed = BNXT_SUPPORTED_SPEEDS;
+ }
if (link_speed & ETH_LINK_SPEED_100M)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
@@ -1432,16 +1464,16 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
"Get link config failed with rc %d\n", rc);
goto exit;
}
- if (link_info->link_up)
+ if (link_info->link_speed)
link->link_speed =
bnxt_parse_hw_link_speed(link_info->link_speed);
else
- link->link_speed = ETH_LINK_SPEED_10M;
+ link->link_speed = ETH_SPEED_NUM_NONE;
link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
link->link_status = link_info->link_up;
link->link_autoneg = link_info->auto_mode ==
HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
- ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
+ ETH_LINK_FIXED : ETH_LINK_AUTONEG;
exit:
return rc;
}
@@ -1474,7 +1506,8 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
link_req.auto_mode =
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
link_req.auto_link_speed_mask =
- bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
+ bnxt_parse_eth_link_speed_mask(bp,
+ dev_conf->link_speeds);
} else {
link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
link_req.link_speed = speed;
@@ -1491,7 +1524,6 @@ port_phy_cfg:
"Set link config failed with rc %d\n", rc);
}
- rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
error:
return rc;
}
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 2f7ae70c..b4a1e727 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -435,7 +435,7 @@ periodic_machine(struct bond_dev_private *internals, uint8_t slave_id)
* In other case (was fast and now it is slow) just switch
* timeout to slow without forcing send of LACP (because standard
* say so)*/
- if (!is_partner_fast)
+ if (is_partner_fast)
SM_FLAG_SET(port, NTT);
} else
return; /* Nothing changed */
@@ -758,7 +758,7 @@ bond_mode_8023ad_periodic_cb(void *arg)
uint16_t key;
slave_id = internals->active_slaves[i];
- rte_eth_link_get(slave_id, &link_info);
+ rte_eth_link_get_nowait(slave_id, &link_info);
rte_eth_macaddr_get(slave_id, &slave_addr);
if (link_info.link_status != 0) {
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index aa71e3f8..7811a5ac 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -643,7 +643,7 @@ bandwidth_left(uint8_t port_id, uint64_t load, uint8_t update_idx,
{
struct rte_eth_link link_status;
- rte_eth_link_get(port_id, &link_status);
+ rte_eth_link_get_nowait(port_id, &link_status);
uint64_t link_bwg = link_status.link_speed * 1000000ULL / 8;
if (link_bwg == 0)
return;
@@ -1667,6 +1667,8 @@ static void
bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct bond_dev_private *internals = dev->data->dev_private;
+ uint16_t max_nb_rx_queues = UINT16_MAX;
+ uint16_t max_nb_tx_queues = UINT16_MAX;
dev_info->max_mac_addrs = 1;
@@ -1674,8 +1676,29 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
? internals->candidate_max_rx_pktlen
: ETHER_MAX_JUMBO_FRAME_LEN;
- dev_info->max_rx_queues = (uint16_t)128;
- dev_info->max_tx_queues = (uint16_t)512;
+ if (internals->slave_count > 0) {
+ /* Max number of tx/rx queues that the bonded device can
+ * support is the minimum values of the bonded slaves, as
+ * all slaves must be capable of supporting the same number
+ * of tx/rx queues.
+ */
+ struct rte_eth_dev_info slave_info;
+ uint8_t idx;
+
+ for (idx = 0; idx < internals->slave_count; idx++) {
+ rte_eth_dev_info_get(internals->slaves[idx].port_id,
+ &slave_info);
+
+ if (slave_info.max_rx_queues < max_nb_rx_queues)
+ max_nb_rx_queues = slave_info.max_rx_queues;
+
+ if (slave_info.max_tx_queues < max_nb_tx_queues)
+ max_nb_tx_queues = slave_info.max_tx_queues;
+ }
+ }
+
+ dev_info->max_rx_queues = max_nb_rx_queues;
+ dev_info->max_tx_queues = max_nb_tx_queues;
dev_info->min_rx_bufsize = 0;
dev_info->pci_dev = NULL;
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 9dca8da1..19afdac9 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -2136,6 +2136,7 @@ unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
u32 bgmap = t4_get_mps_bg_map(adap, idx);
+ u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
#define GET_STAT(name) \
t4_read_reg64(adap, \
@@ -2168,6 +2169,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & F_COUNTPAUSESTATTX) {
+ p->tx_frames -= p->tx_pause;
+ p->tx_octets -= p->tx_pause * 64;
+ }
+ if (stat_ctl & F_COUNTPAUSEMCTX)
+ p->tx_mcast_frames -= p->tx_pause;
+ }
+
p->rx_octets = GET_STAT(RX_PORT_BYTES);
p->rx_frames = GET_STAT(RX_PORT_FRAMES);
p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
@@ -2195,6 +2205,16 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & F_COUNTPAUSESTATRX) {
+ p->rx_frames -= p->rx_pause;
+ p->rx_octets -= p->rx_pause * 64;
+ }
+ if (stat_ctl & F_COUNTPAUSEMCRX)
+ p->rx_mcast_frames -= p->rx_pause;
+ }
+
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h
index 9057e409..9c132dcd 100644
--- a/drivers/net/cxgbe/base/t4_regs.h
+++ b/drivers/net/cxgbe/base/t4_regs.h
@@ -553,6 +553,24 @@
#define V_VF(x) ((x) << S_VF)
#define G_VF(x) (((x) >> S_VF) & M_VF)
+#define A_MPS_STAT_CTL 0x9600
+
+#define S_COUNTPAUSEMCRX 5
+#define V_COUNTPAUSEMCRX(x) ((x) << S_COUNTPAUSEMCRX)
+#define F_COUNTPAUSEMCRX V_COUNTPAUSEMCRX(1U)
+
+#define S_COUNTPAUSESTATRX 4
+#define V_COUNTPAUSESTATRX(x) ((x) << S_COUNTPAUSESTATRX)
+#define F_COUNTPAUSESTATRX V_COUNTPAUSESTATRX(1U)
+
+#define S_COUNTPAUSEMCTX 3
+#define V_COUNTPAUSEMCTX(x) ((x) << S_COUNTPAUSEMCTX)
+#define F_COUNTPAUSEMCTX V_COUNTPAUSEMCTX(1U)
+
+#define S_COUNTPAUSESTATTX 2
+#define V_COUNTPAUSESTATTX(x) ((x) << S_COUNTPAUSESTATTX)
+#define F_COUNTPAUSESTATTX V_COUNTPAUSESTATTX(1U)
+
#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index b7f28ebb..06437c1b 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -654,8 +654,6 @@ static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
cxgbe_stats_get(pi, &ps);
/* RX Stats */
- eth_stats->ipackets = ps.rx_frames;
- eth_stats->ibytes = ps.rx_octets;
eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
ps.rx_ovflow2 + ps.rx_ovflow3 +
ps.rx_trunc0 + ps.rx_trunc1 +
@@ -675,6 +673,8 @@ static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
eth_stats->q_ipackets[i] = rxq->stats.pkts;
eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
+ eth_stats->ipackets += eth_stats->q_ipackets[i];
+ eth_stats->ibytes += eth_stats->q_ibytes[i];
}
for (i = 0; i < pi->n_tx_qsets; i++) {
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 6c25c8da..d648789c 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -82,7 +82,7 @@
#define E1000_MAX_FLEX_FILTER_DWDS \
(E1000_MAX_FLEX_FILTER_LEN / sizeof(uint32_t))
#define E1000_FLEX_FILTERS_MASK_SIZE \
- (E1000_MAX_FLEX_FILTER_DWDS / 4)
+ (E1000_MAX_FLEX_FILTER_DWDS / 2)
#define E1000_FHFT_QUEUEING_LEN 0x0000007F
#define E1000_FHFT_QUEUEING_QUEUE 0x00000700
#define E1000_FHFT_QUEUEING_PRIO 0x00070000
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index be2600d4..9cf619fa 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -129,7 +129,7 @@ static int eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
@@ -1381,7 +1381,9 @@ eth_igb_start(struct rte_eth_dev *dev)
if (rte_intr_allow_others(intr_handle)) {
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
- eth_igb_lsc_interrupt_setup(dev);
+ eth_igb_lsc_interrupt_setup(dev, TRUE);
+ else
+ eth_igb_lsc_interrupt_setup(dev, FALSE);
} else {
rte_intr_callback_unregister(intr_handle,
eth_igb_interrupt_handler,
@@ -2543,18 +2545,23 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
*
* @param dev
* Pointer to struct rte_eth_dev.
+ * @param on
+ * Enable or Disable
*
* @return
* - On success, zero.
* - On failure, a negative value.
*/
static int
-eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
- intr->mask |= E1000_ICR_LSC;
+ if (on)
+ intr->mask |= E1000_ICR_LSC;
+ else
+ intr->mask &= ~E1000_ICR_LSC;
return 0;
}
@@ -3736,10 +3743,6 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
}
wufc = E1000_READ_REG(hw, E1000_WUFC);
- if (flex_filter->index < E1000_MAX_FHFT)
- reg_off = E1000_FHFT(flex_filter->index);
- else
- reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
if (add) {
if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
@@ -3769,6 +3772,11 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
+ if (flex_filter->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(flex_filter->index);
+ else
+ reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
+
E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
(E1000_WUFC_FLX0 << flex_filter->index));
queueing = filter->len |
@@ -3797,6 +3805,11 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
return -ENOENT;
}
+ if (it->index < E1000_MAX_FHFT)
+ reg_off = E1000_FHFT(it->index);
+ else
+ reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
+
for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
E1000_WRITE_REG(hw, E1000_WUFC, wufc &
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index dbd37acc..556d4605 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -2179,9 +2179,11 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
/* Enable both L3/L4 rx checksum offload */
if (dev->data->dev_conf.rxmode.hw_ip_checksum)
- rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+ rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
+ E1000_RXCSUM_CRCOFL);
else
- rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+ rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL |
+ E1000_RXCSUM_CRCOFL);
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
/* Setup the Receive Control Register. */
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index 1fc3654e..6efe0c3c 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -677,11 +677,10 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring)
static void ena_tx_queue_release_bufs(struct ena_ring *ring)
{
- unsigned int ring_mask = ring->ring_size - 1;
+ unsigned int i;
- while (ring->next_to_clean != ring->next_to_use) {
- struct ena_tx_buffer *tx_buf =
- &ring->tx_buffer_info[ring->next_to_clean & ring_mask];
+ for (i = 0; i < ring->ring_size; ++i) {
+ struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i];
if (tx_buf->mbuf)
rte_pktmbuf_free(tx_buf->mbuf);
@@ -1683,6 +1682,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Free whole mbuf chain */
mbuf = tx_info->mbuf;
rte_pktmbuf_free(mbuf);
+ tx_info->mbuf = NULL;
/* Put back descriptor to the ring for reuse */
tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id;
diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c
index 84e4840a..1cd031ac 100644
--- a/drivers/net/enic/base/vnic_dev.c
+++ b/drivers/net/enic/base/vnic_dev.c
@@ -645,7 +645,7 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
- u64 a0, a1 = 0;
+ u64 a0 = 0, a1 = 0;
int wait = 1000;
int err, i;
@@ -1021,7 +1021,7 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter_v2 *data)
{
- u64 a0, a1;
+ u64 a0 = 0, a1 = 0;
int wait = 1000;
dma_addr_t tlv_pa;
int ret = -EINVAL;
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 912ea157..1934f8ba 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -426,7 +426,8 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
tail_idx = enic_ring_incr(desc_count, tail_idx);
}
- rte_mempool_put_bulk(pool, (void **)free, nb_free);
+ if (nb_free > 0)
+ rte_mempool_put_bulk(pool, (void **)free, nb_free);
wq->tail_idx = tail_idx;
wq->ring.desc_avail += nb_to_free;
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 32b0ea93..d04efdc6 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -83,6 +83,7 @@ static void fm10k_rx_queue_release(void *queue);
static void fm10k_set_rx_function(struct rte_eth_dev *dev);
static void fm10k_set_tx_function(struct rte_eth_dev *dev);
static int fm10k_check_ftag(struct rte_devargs *devargs);
+static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
struct fm10k_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -1164,6 +1165,8 @@ fm10k_dev_start(struct rte_eth_dev *dev)
if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
+ fm10k_link_update(dev, 0);
+
return 0;
}
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index fd0a7230..caa2e1eb 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -2805,7 +2805,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4e4cd16a..65e10f3b 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1591,11 +1591,15 @@ i40e_parse_link_speeds(uint16_t link_speeds)
static int
i40e_phy_conf_link(struct i40e_hw *hw,
uint8_t abilities,
- uint8_t force_speed)
+ uint8_t force_speed,
+ bool is_up)
{
enum i40e_status_code status;
struct i40e_aq_get_phy_abilities_resp phy_ab;
struct i40e_aq_set_phy_config phy_conf;
+ enum i40e_aq_phy_type cnt;
+ uint32_t phy_type_mask = 0;
+
const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
I40E_AQ_PHY_FLAG_PAUSE_RX |
@@ -1613,6 +1617,10 @@ i40e_phy_conf_link(struct i40e_hw *hw,
if (status)
return ret;
+ /* If link already up, no need to set up again */
+ if (is_up && phy_ab.phy_type != 0)
+ return I40E_SUCCESS;
+
memset(&phy_conf, 0, sizeof(phy_conf));
/* bits 0-2 use the values from get_phy_abilities_resp */
@@ -1623,13 +1631,21 @@ i40e_phy_conf_link(struct i40e_hw *hw,
if (abilities & I40E_AQ_PHY_AN_ENABLED)
phy_conf.link_speed = advt;
else
- phy_conf.link_speed = force_speed;
+ phy_conf.link_speed = is_up ? force_speed : phy_ab.link_speed;
phy_conf.abilities = abilities;
+
+
+ /* To enable link, phy_type mask needs to include each type */
+ for (cnt = I40E_PHY_TYPE_SGMII; cnt < I40E_PHY_TYPE_MAX; cnt++)
+ phy_type_mask |= 1 << cnt;
+
/* use get_phy_abilities_resp value for the rest */
- phy_conf.phy_type = phy_ab.phy_type;
- phy_conf.phy_type_ext = phy_ab.phy_type_ext;
+ phy_conf.phy_type = is_up ? cpu_to_le32(phy_type_mask) : 0;
+ phy_conf.phy_type_ext = is_up ? (I40E_AQ_PHY_TYPE_EXT_25G_KR |
+ I40E_AQ_PHY_TYPE_EXT_25G_CR | I40E_AQ_PHY_TYPE_EXT_25G_SR |
+ I40E_AQ_PHY_TYPE_EXT_25G_LR) : 0;
phy_conf.fec_config = phy_ab.mod_type_ext;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
@@ -1661,13 +1677,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
abilities |= I40E_AQ_PHY_AN_ENABLED;
abilities |= I40E_AQ_PHY_LINK_ENABLED;
- /* Skip changing speed on 40G interfaces, FW does not support */
- if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) {
- speed = I40E_LINK_SPEED_UNKNOWN;
- abilities |= I40E_AQ_PHY_AN_ENABLED;
- }
-
- return i40e_phy_conf_link(hw, abilities, speed);
+ return i40e_phy_conf_link(hw, abilities, speed, true);
}
static int
@@ -1992,7 +2002,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- return i40e_phy_conf_link(hw, abilities, speed);
+ return i40e_phy_conf_link(hw, abilities, speed, false);
}
int
@@ -2096,6 +2106,10 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPRCH(idx), I40E_GLV_BPRCL(idx),
vsi->offset_loaded, &oes->rx_broadcast,
&nes->rx_broadcast);
+ /* exclude CRC bytes */
+ nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
+ nes->rx_broadcast) * ETHER_CRC_LEN;
+
i40e_stat_update_32(hw, I40E_GLV_RDPC(idx), vsi->offset_loaded,
&oes->rx_discards, &nes->rx_discards);
/* GLV_REPC not supported */
@@ -2115,6 +2129,9 @@ i40e_update_vsi_stats(struct i40e_vsi *vsi)
i40e_stat_update_48(hw, I40E_GLV_BPTCH(idx), I40E_GLV_BPTCL(idx),
vsi->offset_loaded, &oes->tx_broadcast,
&nes->tx_broadcast);
+ /* exclude CRC bytes */
+ nes->tx_bytes -= (nes->tx_unicast + nes->tx_multicast +
+ nes->tx_broadcast) * ETHER_CRC_LEN;
/* GLV_TDPC not supported */
i40e_stat_update_32(hw, I40E_GLV_TEPC(idx), vsi->offset_loaded,
&oes->tx_errors, &nes->tx_errors);
@@ -2146,6 +2163,19 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
struct i40e_hw_port_stats *ns = &pf->stats; /* new stats */
struct i40e_hw_port_stats *os = &pf->stats_offset; /* old stats */
+ /* Get rx/tx bytes of internal transfer packets */
+ i40e_stat_update_48(hw, I40E_GLV_GORCH(hw->port),
+ I40E_GLV_GORCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_rx_bytes_offset,
+ &pf->internal_rx_bytes);
+
+ i40e_stat_update_48(hw, I40E_GLV_GOTCH(hw->port),
+ I40E_GLV_GOTCL(hw->port),
+ pf->offset_loaded,
+ &pf->internal_tx_bytes_offset,
+ &pf->internal_tx_bytes);
+
/* Get statistics of struct i40e_eth_stats */
i40e_stat_update_48(hw, I40E_GLPRT_GORCH(hw->port),
I40E_GLPRT_GORCL(hw->port),
@@ -2167,7 +2197,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
* so subtract ETHER_CRC_LEN from the byte counter for each rx packet.
*/
ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast +
- ns->eth.rx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.rx_broadcast) * ETHER_CRC_LEN + pf->internal_rx_bytes;
i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port),
pf->offset_loaded, &os->eth.rx_discards,
@@ -2195,7 +2225,7 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
- ns->eth.tx_broadcast) * ETHER_CRC_LEN;
+ ns->eth.tx_broadcast) * ETHER_CRC_LEN + pf->internal_tx_bytes;
/* GLPRT_TEPC not supported */
/* additional port specific stats */
@@ -3992,6 +4022,8 @@ i40e_vsi_config_tc_queue_mapping(struct i40e_vsi *vsi,
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
if (enabled_tcmap & (1 << i))
total_tc++;
+ if (total_tc == 0)
+ total_tc = 1;
vsi->enabled_tc = enabled_tcmap;
/* Number of queues per enabled TC */
@@ -4920,6 +4952,10 @@ i40e_pf_setup(struct i40e_pf *pf)
pf->offset_loaded = FALSE;
memset(&pf->stats, 0, sizeof(struct i40e_hw_port_stats));
memset(&pf->stats_offset, 0, sizeof(struct i40e_hw_port_stats));
+ pf->internal_rx_bytes = 0;
+ pf->internal_tx_bytes = 0;
+ pf->internal_rx_bytes_offset = 0;
+ pf->internal_tx_bytes_offset = 0;
ret = i40e_pf_get_switch_config(pf);
if (ret != I40E_SUCCESS) {
@@ -8317,8 +8353,9 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
*/
/* For both X710 and XL710 */
-#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x10000200
-#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1 0x10000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2 0x20000200
+#define I40E_GL_SWR_PRI_JOIN_MAP_0 0x26CE00
#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
@@ -8370,8 +8407,12 @@ i40e_configure_registers(struct i40e_hw *hw)
reg_table[i].val =
I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
else /* For X710/XL710/XXV710 */
- reg_table[i].val =
- I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE;
+ if (hw->aq.fw_maj_ver < 6)
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_1;
+ else
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE_2;
}
if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 5f3ecd9a..f2833197 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -431,6 +431,11 @@ struct i40e_pf {
struct i40e_hw_port_stats stats_offset;
struct i40e_hw_port_stats stats;
+ /* internal packet byte count, it should be excluded from the total */
+ uint64_t internal_rx_bytes;
+ uint64_t internal_tx_bytes;
+ uint64_t internal_rx_bytes_offset;
+ uint64_t internal_tx_bytes_offset;
bool offset_loaded;
struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15c..e78610b8 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -294,8 +294,12 @@ i40e_fdir_teardown(struct i40e_pf *pf)
vsi = pf->fdir.fdir_vsi;
if (!vsi)
return;
- i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
- i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
+ if (err)
+ PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
+ err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
+ if (err)
+ PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
i40e_dev_rx_queue_release(pf->fdir.rxq);
pf->fdir.rxq = NULL;
i40e_dev_tx_queue_release(pf->fdir.txq);
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 602e40c9..ba33b2a5 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2324,7 +2324,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
case I40E_FLAG_HEADER_SPLIT_DISABLED:
default:
rxq->rx_hdr_len = 0;
- rxq->rx_buf_len = RTE_ALIGN(buf_size,
+ rxq->rx_buf_len = RTE_ALIGN_FLOOR(buf_size,
(1 << I40E_RXQ_CTX_DBUFF_SHIFT));
rxq->hs_mode = i40e_header_split_none;
break;
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index d6686f6c..f994fedc 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5044,6 +5044,9 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
if (ixgbe_vmdq_mode_check(hw) < 0)
return -ENOTSUP;
+ if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
+
memset(&mr_info->mr_conf[rule_id], 0,
sizeof(struct rte_eth_mirror_conf));
@@ -5197,7 +5200,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
tmp |= (msix_vector << (8 * (queue & 0x3)));
IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
} else if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540)) {
+ (hw->mac.type == ixgbe_mac_X540) ||
+ (hw->mac.type == ixgbe_mac_X550)) {
if (direction == -1) {
/* other causes */
idx = ((queue & 1) * 8);
@@ -5303,6 +5307,7 @@ ixgbe_configure_msix(struct rte_eth_dev *dev)
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
break;
default:
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 83f9143b..4a03d342 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -3157,6 +3157,13 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
NB_SEGS(rep) = 0x2a;
PORT(rep) = 0x2a;
rep->ol_flags = -1;
+ /*
+ * Clear special flags in mbuf to avoid
+ * crashing while freeing.
+ */
+ rep->ol_flags &=
+ ~(uint64_t)(IND_ATTACHED_MBUF |
+ CTRL_MBUF_FLAG);
#endif
assert(rep->buf_len == seg->buf_len);
/* Reconfigure sge to use rep instead of seg. */
@@ -5618,8 +5625,10 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_dev = list[i];
DEBUG("device opened");
- if (ibv_query_device(attr_ctx, &device_attr))
+ if (ibv_query_device(attr_ctx, &device_attr)) {
+ err = EINVAL;
goto error;
+ }
INFO("%u port(s) detected", device_attr.phys_port_cnt);
for (i = 0; i < device_attr.phys_port_cnt; i++) {
@@ -5645,19 +5654,23 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
DEBUG("using port %u (%08" PRIx32 ")", port, test);
ctx = ibv_open_device(ibv_dev);
- if (ctx == NULL)
+ if (ctx == NULL) {
+ err = ENODEV;
goto port_error;
+ }
/* Check port status. */
err = ibv_query_port(ctx, port, &port_attr);
if (err) {
ERROR("port query failed: %s", strerror(err));
+ err = ENODEV;
goto port_error;
}
if (port_attr.link_layer != IBV_LINK_LAYER_ETHERNET) {
ERROR("port %d is not configured in Ethernet mode",
port);
+ err = EINVAL;
goto port_error;
}
@@ -5694,6 +5707,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
#ifdef HAVE_EXP_QUERY_DEVICE
if (ibv_exp_query_device(ctx, &exp_device_attr)) {
ERROR("ibv_exp_query_device() failed");
+ err = ENODEV;
goto port_error;
}
#ifdef RSS_SUPPORT
@@ -5769,6 +5783,7 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
if (priv_get_mac(priv, &mac.addr_bytes)) {
ERROR("cannot get MAC address, is mlx4_en loaded?"
" (errno: %s)", strerror(errno));
+ err = ENODEV;
goto port_error;
}
INFO("port %u MAC address is %02x:%02x:%02x:%02x:%02x:%02x",
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 0aa274e6..ca981a53 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -119,6 +119,7 @@ struct ethtool_link_settings {
#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
#endif
+#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 (SCHAR_MAX)
/**
* Return private structure associated with an Ethernet device.
@@ -754,9 +755,12 @@ static int
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
{
struct priv *priv = mlx5_get_priv(dev);
- struct ethtool_link_settings edata = {
- .cmd = ETHTOOL_GLINKSETTINGS,
- };
+ __extension__ struct {
+ struct ethtool_link_settings edata;
+ uint32_t link_mode_data[3 *
+ ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32];
+ } ecmd;
+
struct ifreq ifr;
struct rte_eth_link dev_link;
uint64_t sc;
@@ -769,15 +773,23 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
memset(&dev_link, 0, sizeof(dev_link));
dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
(ifr.ifr_flags & IFF_RUNNING));
- ifr.ifr_data = (void *)&edata;
+ memset(&ecmd, 0, sizeof(ecmd));
+ ecmd.edata.cmd = ETHTOOL_GLINKSETTINGS;
+ ifr.ifr_data = (void *)&ecmd;
if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
strerror(errno));
return -1;
}
- dev_link.link_speed = edata.speed;
- sc = edata.link_mode_masks[0] |
- ((uint64_t)edata.link_mode_masks[1] << 32);
+ ecmd.edata.link_mode_masks_nwords = -ecmd.edata.link_mode_masks_nwords;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
+ DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
+ strerror(errno));
+ return -1;
+ }
+ dev_link.link_speed = ecmd.edata.speed;
+ sc = ecmd.edata.link_mode_masks[0] |
+ ((uint64_t)ecmd.edata.link_mode_masks[1] << 32);
priv->link_speed_capa = 0;
if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
@@ -813,7 +825,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_100G;
- dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
+ dev_link.link_duplex = ((ecmd.edata.duplex == DUPLEX_HALF) ?
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
ETH_LINK_SPEED_FIXED);
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index 1acf6826..d4c19e5a 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -142,6 +142,7 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
+ /* fallthrough */
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
@@ -731,9 +732,11 @@ priv_fdir_disable(struct priv *priv)
/* Destroy flow director context in each RX queue. */
for (i = 0; (i != priv->rxqs_n); i++) {
- struct rxq_ctrl *rxq_ctrl =
- container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
+ struct rxq_ctrl *rxq_ctrl;
+ if (!(*priv->rxqs)[i])
+ continue;
+ rxq_ctrl = container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq);
if (!rxq_ctrl->fdir_queue)
continue;
priv_fdir_queue_destroy(priv, rxq_ctrl->fdir_queue);
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 118f6d67..aea203b3 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -868,12 +868,16 @@ static inline int
rxq_setup(struct rxq_ctrl *tmpl)
{
struct ibv_cq *ibcq = tmpl->cq;
- struct mlx5_cq *cq = to_mxxx(cq, cq);
+ struct ibv_mlx5_cq_info cq_info;
struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq);
struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] =
rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket);
- if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) {
+ if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
+ ERROR("Unable to query CQ info. check your OFED.");
+ return ENOTSUP;
+ }
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
"it should be set to %u", RTE_CACHE_LINE_SIZE);
return EINVAL;
@@ -881,16 +885,16 @@ rxq_setup(struct rxq_ctrl *tmpl)
if (elts == NULL)
return ENOMEM;
tmpl->rxq.rq_db = rwq->rq.db;
- tmpl->rxq.cqe_n = log2above(ibcq->cqe);
+ tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt);
tmpl->rxq.cq_ci = 0;
tmpl->rxq.rq_ci = 0;
- tmpl->rxq.cq_db = cq->dbrec;
+ tmpl->rxq.cq_db = cq_info.dbrec;
tmpl->rxq.wqes =
(volatile struct mlx5_wqe_data_seg (*)[])
(uintptr_t)rwq->rq.buff;
tmpl->rxq.cqes =
(volatile struct mlx5_cqe (*)[])
- (uintptr_t)cq->active_buf->buf;
+ (uintptr_t)cq_info.buf;
tmpl->rxq.elts = elts;
return 0;
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index c2863671..d0c4dce0 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -115,7 +115,7 @@ txq_free_elts(struct txq_ctrl *txq_ctrl)
struct rte_mbuf *elt = (*elts)[elts_tail];
assert(elt != NULL);
- rte_pktmbuf_free(elt);
+ rte_pktmbuf_free_seg(elt);
#ifndef NDEBUG
/* Poisoning. */
memset(&(*elts)[elts_tail],
@@ -205,14 +205,18 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
{
struct mlx5_qp *qp = to_mqp(tmpl->qp);
struct ibv_cq *ibcq = tmpl->cq;
- struct mlx5_cq *cq = to_mxxx(cq, cq);
+ struct ibv_mlx5_cq_info cq_info;
- if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) {
+ if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
+ ERROR("Unable to query CQ info. check your OFED.");
+ return ENOTSUP;
+ }
+ if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
"it should be set to %u", RTE_CACHE_LINE_SIZE);
return EINVAL;
}
- tmpl->txq.cqe_n = log2above(ibcq->cqe);
+ tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt);
tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
tmpl->txq.wqes =
(volatile struct mlx5_wqe64 (*)[])
@@ -220,10 +224,10 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
tmpl->txq.bf_reg = qp->gen_data.bf->reg;
- tmpl->txq.cq_db = cq->dbrec;
+ tmpl->txq.cq_db = cq_info.dbrec;
tmpl->txq.cqes =
(volatile struct mlx5_cqe (*)[])
- (uintptr_t)cq->active_buf->buf;
+ (uintptr_t)cq_info.buf;
tmpl->txq.elts =
(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])
((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 23221478..9d782ac7 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -203,9 +203,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, "*********************************\n");
DP_INFO(edev, " DPDK version:%s\n", rte_version());
- DP_INFO(edev, " Chip details : %s%d\n",
+ DP_INFO(edev, " Chip details : %s %c%d\n",
ECORE_IS_BB(edev) ? "BB" : "AH",
- CHIP_REV_IS_A0(edev) ? 0 : 1);
+ 'A' + edev->chip_rev,
+ (int)edev->chip_metal);
snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 452ad2a5..67ebb1e5 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -423,7 +423,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
}
}
- memset(mz->addr, 0, sizeof(mz->len));
+ memset(mz->addr, 0, mz->len);
vq->vq_ring_mem = mz->phys_addr;
vq->vq_ring_virt_mem = mz->addr;
@@ -1477,37 +1477,19 @@ virtio_dev_configure(struct rte_eth_dev *dev)
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
- uint64_t req_features;
- int ret;
PMD_INIT_LOG(DEBUG, "configure");
- req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
- if (rxmode->hw_ip_checksum)
- req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
- if (rxmode->enable_lro)
- req_features |=
- (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
- (1ULL << VIRTIO_NET_F_GUEST_TSO6);
-
- /* if request features changed, reinit the device */
- if (req_features != hw->req_guest_features) {
- ret = virtio_init_device(dev, req_features);
- if (ret < 0)
- return ret;
- }
- if (rxmode->hw_ip_checksum &&
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
+ /* Virtio does L4 checksum but not L3! */
+ if (rxmode->hw_ip_checksum) {
PMD_DRV_LOG(NOTICE,
- "rx ip checksum not available on this host");
+ "virtio does not support IP checksum");
return -ENOTSUP;
}
- if (rxmode->enable_lro &&
- (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
+ if (rxmode->enable_lro) {
PMD_DRV_LOG(NOTICE,
- "lro not available on this host");
+ "virtio does not support Large Receive Offload");
return -ENOTSUP;
}
@@ -1714,8 +1696,8 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
};
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO;
+ DEV_RX_OFFLOAD_UDP_CKSUM;
+
dev_info->tx_offload_capa = 0;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index f018724f..b1bd6234 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -479,7 +479,6 @@ virtio_user_pmd_remove(const char *name)
virtio_user_dev_uninit(dev);
rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index f123df90..aedac6ca 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -831,7 +831,10 @@ vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
struct vmxnet3_hw *hw = dev->data->dev_private;
uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
- memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
+ else
+ memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 3ded18ee..3056f4ff 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -734,6 +734,12 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
(int)(rcd - (struct Vmxnet3_RxCompDesc *)
rxq->comp_ring.base), rcd->rxdIdx);
rte_pktmbuf_free_seg(rxm);
+ if (rxq->start_seg) {
+ struct rte_mbuf *start = rxq->start_seg;
+
+ rxq->start_seg = NULL;
+ rte_pktmbuf_free(start);
+ }
goto rcd_done;
}
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index 07282317..b40c49c3 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -432,7 +432,8 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ip_hdr;
- unsigned ipdata_offset, pad_len, data_len;
+ uint32_t ipdata_offset, data_len;
+ uint32_t pad_len = 0;
char *padding;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -455,16 +456,32 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
if (cparams->do_hash && cparams->hash_verify)
data_len -= cparams->digest_length;
- pad_len = data_len % cparams->block_size ? cparams->block_size -
- (data_len % cparams->block_size) : 0;
+ if (cparams->do_cipher) {
+ /*
+ * Following algorithms are block cipher algorithms,
+ * and might need padding
+ */
+ switch (cparams->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ if (data_len % cparams->block_size)
+ pad_len = cparams->block_size -
+ (data_len % cparams->block_size);
+ break;
+ default:
+ pad_len = 0;
+ }
- if (pad_len) {
- padding = rte_pktmbuf_append(m, pad_len);
- if (unlikely(!padding))
- return -1;
+ if (pad_len) {
+ padding = rte_pktmbuf_append(m, pad_len);
+ if (unlikely(!padding))
+ return -1;
- data_len += pad_len;
- memset(padding, 0, pad_len);
+ data_len += pad_len;
+ memset(padding, 0, pad_len);
+ }
}
/* Set crypto operation data parameters */
@@ -869,7 +886,8 @@ l2fwd_crypto_usage(const char *prgname)
" (0 to disable, 10 default, 86400 maximum)\n"
" --cdev_type HW / SW / ANY\n"
- " --chain HASH_CIPHER / CIPHER_HASH\n"
+ " --chain HASH_CIPHER / CIPHER_HASH / CIPHER_ONLY /"
+ " HASH_ONLY\n"
" --cipher_algo ALGO\n"
" --cipher_op ENCRYPT / DECRYPT\n"
@@ -1379,7 +1397,7 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
l2fwd_crypto_default_options(options);
- while ((opt = getopt_long(argc, argvopt, "p:q:st:", lgopts,
+ while ((opt = getopt_long(argc, argvopt, "p:q:sT:", lgopts,
&option_index)) != EOF) {
switch (opt) {
/* long options */
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 9cc44603..46b327e1 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -614,7 +614,7 @@ em_parse_ptype(struct rte_mbuf *m)
packet_type |= RTE_PTYPE_L4_UDP;
} else
packet_type |= RTE_PTYPE_L3_IPV4_EXT;
- } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
+ } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
ipv6_hdr = (struct ipv6_hdr *)l3;
if (ipv6_hdr->proto == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP;
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
index c7490c61..8d02e1ad 100644
--- a/examples/qos_sched/main.h
+++ b/examples/qos_sched/main.h
@@ -69,8 +69,13 @@ extern "C" {
#define BURST_TX_DRAIN_US 100
#ifndef APP_MAX_LCORE
+#if (RTE_MAX_LCORE > 64)
#define APP_MAX_LCORE 64
+#else
+#define APP_MAX_LCORE RTE_MAX_LCORE
+#endif
#endif
+
#define MAX_DATA_STREAMS (APP_MAX_LCORE/2)
#define MAX_SCHED_SUBPORTS 8
#define MAX_SCHED_PIPES 4096
diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
index 54e95d5c..c836a2c1 100644
--- a/lib/librte_cryptodev/rte_cryptodev.c
+++ b/lib/librte_cryptodev/rte_cryptodev.c
@@ -737,8 +737,8 @@ rte_cryptodev_stop(uint8_t dev_id)
return;
}
- dev->data->dev_started = 0;
(*dev->dev_ops->dev_stop)(dev);
+ dev->data->dev_started = 0;
}
int
diff --git a/lib/librte_eal/bsdapp/contigmem/contigmem.c b/lib/librte_eal/bsdapp/contigmem/contigmem.c
index da971deb..e8fb9087 100644
--- a/lib/librte_eal/bsdapp/contigmem/contigmem.c
+++ b/lib/librte_eal/bsdapp/contigmem/contigmem.c
@@ -50,24 +50,37 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/pmap.h>
+#include <vm/vm_param.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
+#include <vm/vm_phys.h>
+
+struct contigmem_buffer {
+ void *addr;
+ int refcnt;
+ struct mtx mtx;
+};
+
+struct contigmem_vm_handle {
+ int buffer_index;
+};
static int contigmem_load(void);
static int contigmem_unload(void);
static int contigmem_physaddr(SYSCTL_HANDLER_ARGS);
-static d_mmap_t contigmem_mmap;
static d_mmap_single_t contigmem_mmap_single;
static d_open_t contigmem_open;
+static d_close_t contigmem_close;
static int contigmem_num_buffers = RTE_CONTIGMEM_DEFAULT_NUM_BUFS;
static int64_t contigmem_buffer_size = RTE_CONTIGMEM_DEFAULT_BUF_SIZE;
static eventhandler_tag contigmem_eh_tag;
-static void *contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
+static struct contigmem_buffer contigmem_buffers[RTE_CONTIGMEM_MAX_NUM_BUFS];
static struct cdev *contigmem_cdev = NULL;
+static int contigmem_refcnt;
TUNABLE_INT("hw.contigmem.num_buffers", &contigmem_num_buffers);
TUNABLE_QUAD("hw.contigmem.buffer_size", &contigmem_buffer_size);
@@ -78,6 +91,8 @@ SYSCTL_INT(_hw_contigmem, OID_AUTO, num_buffers, CTLFLAG_RD,
&contigmem_num_buffers, 0, "Number of contigmem buffers allocated");
SYSCTL_QUAD(_hw_contigmem, OID_AUTO, buffer_size, CTLFLAG_RD,
&contigmem_buffer_size, 0, "Size of each contiguous buffer");
+SYSCTL_INT(_hw_contigmem, OID_AUTO, num_references, CTLFLAG_RD,
+ &contigmem_refcnt, 0, "Number of references to contigmem");
static SYSCTL_NODE(_hw_contigmem, OID_AUTO, physaddr, CTLFLAG_RD, 0,
"physaddr");
@@ -114,42 +129,49 @@ MODULE_VERSION(contigmem, 1);
static struct cdevsw contigmem_ops = {
.d_name = "contigmem",
.d_version = D_VERSION,
- .d_mmap = contigmem_mmap,
+ .d_flags = D_TRACKCLOSE,
.d_mmap_single = contigmem_mmap_single,
.d_open = contigmem_open,
+ .d_close = contigmem_close,
};
static int
contigmem_load()
{
char index_string[8], description[32];
- int i;
+ int i, error = 0;
+ void *addr;
if (contigmem_num_buffers > RTE_CONTIGMEM_MAX_NUM_BUFS) {
printf("%d buffers requested is greater than %d allowed\n",
contigmem_num_buffers, RTE_CONTIGMEM_MAX_NUM_BUFS);
- return EINVAL;
+ error = EINVAL;
+ goto error;
}
if (contigmem_buffer_size < PAGE_SIZE ||
(contigmem_buffer_size & (contigmem_buffer_size - 1)) != 0) {
printf("buffer size 0x%lx is not greater than PAGE_SIZE and "
"power of two\n", contigmem_buffer_size);
- return EINVAL;
+ error = EINVAL;
+ goto error;
}
for (i = 0; i < contigmem_num_buffers; i++) {
- contigmem_buffers[i] =
- contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO, 0,
- BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
-
- if (contigmem_buffers[i] == NULL) {
+ addr = contigmalloc(contigmem_buffer_size, M_CONTIGMEM, M_ZERO,
+ 0, BUS_SPACE_MAXADDR, contigmem_buffer_size, 0);
+ if (addr == NULL) {
printf("contigmalloc failed for buffer %d\n", i);
- return ENOMEM;
+ error = ENOMEM;
+ goto error;
}
- printf("%2u: virt=%p phys=%p\n", i, contigmem_buffers[i],
- (void *)pmap_kextract((vm_offset_t)contigmem_buffers[i]));
+ printf("%2u: virt=%p phys=%p\n", i, addr,
+ (void *)pmap_kextract((vm_offset_t)addr));
+
+ mtx_init(&contigmem_buffers[i].mtx, "contigmem", NULL, MTX_DEF);
+ contigmem_buffers[i].addr = addr;
+ contigmem_buffers[i].refcnt = 0;
snprintf(index_string, sizeof(index_string), "%d", i);
snprintf(description, sizeof(description),
@@ -165,6 +187,17 @@ contigmem_load()
GID_WHEEL, 0600, "contigmem");
return 0;
+
+error:
+ for (i = 0; i < contigmem_num_buffers; i++) {
+ if (contigmem_buffers[i].addr != NULL)
+ contigfree(contigmem_buffers[i].addr,
+ contigmem_buffer_size, M_CONTIGMEM);
+ if (mtx_initialized(&contigmem_buffers[i].mtx))
+ mtx_destroy(&contigmem_buffers[i].mtx);
+ }
+
+ return error;
}
static int
@@ -172,16 +205,22 @@ contigmem_unload()
{
int i;
+ if (contigmem_refcnt > 0)
+ return EBUSY;
+
if (contigmem_cdev != NULL)
destroy_dev(contigmem_cdev);
if (contigmem_eh_tag != NULL)
EVENTHANDLER_DEREGISTER(process_exit, contigmem_eh_tag);
- for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++)
- if (contigmem_buffers[i] != NULL)
- contigfree(contigmem_buffers[i], contigmem_buffer_size,
- M_CONTIGMEM);
+ for (i = 0; i < RTE_CONTIGMEM_MAX_NUM_BUFS; i++) {
+ if (contigmem_buffers[i].addr != NULL)
+ contigfree(contigmem_buffers[i].addr,
+ contigmem_buffer_size, M_CONTIGMEM);
+ if (mtx_initialized(&contigmem_buffers[i].mtx))
+ mtx_destroy(&contigmem_buffers[i].mtx);
+ }
return 0;
}
@@ -192,7 +231,7 @@ contigmem_physaddr(SYSCTL_HANDLER_ARGS)
uint64_t physaddr;
int index = (int)(uintptr_t)arg1;
- physaddr = (uint64_t)vtophys(contigmem_buffers[index]);
+ physaddr = (uint64_t)vtophys(contigmem_buffers[index].addr);
return sysctl_handle_64(oidp, &physaddr, 0, req);
}
@@ -200,22 +239,121 @@ static int
contigmem_open(struct cdev *cdev, int fflags, int devtype,
struct thread *td)
{
+
+ atomic_add_int(&contigmem_refcnt, 1);
+
return 0;
}
static int
-contigmem_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
- int prot, vm_memattr_t *memattr)
+contigmem_close(struct cdev *cdev, int fflags, int devtype,
+ struct thread *td)
{
- *paddr = offset;
+ atomic_subtract_int(&contigmem_refcnt, 1);
+
return 0;
}
static int
+contigmem_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
+ vm_ooffset_t foff, struct ucred *cred, u_short *color)
+{
+ struct contigmem_vm_handle *vmh = handle;
+ struct contigmem_buffer *buf;
+
+ buf = &contigmem_buffers[vmh->buffer_index];
+
+ atomic_add_int(&contigmem_refcnt, 1);
+
+ mtx_lock(&buf->mtx);
+ if (buf->refcnt == 0)
+ memset(buf->addr, 0, contigmem_buffer_size);
+ buf->refcnt++;
+ mtx_unlock(&buf->mtx);
+
+ return 0;
+}
+
+static void
+contigmem_cdev_pager_dtor(void *handle)
+{
+ struct contigmem_vm_handle *vmh = handle;
+ struct contigmem_buffer *buf;
+
+ buf = &contigmem_buffers[vmh->buffer_index];
+
+ mtx_lock(&buf->mtx);
+ buf->refcnt--;
+ mtx_unlock(&buf->mtx);
+
+ free(vmh, M_CONTIGMEM);
+
+ atomic_subtract_int(&contigmem_refcnt, 1);
+}
+
+static int
+contigmem_cdev_pager_fault(vm_object_t object, vm_ooffset_t offset, int prot,
+ vm_page_t *mres)
+{
+ vm_paddr_t paddr;
+ vm_page_t m_paddr, page;
+ vm_memattr_t memattr, memattr1;
+
+ memattr = object->memattr;
+
+ VM_OBJECT_WUNLOCK(object);
+
+ paddr = offset;
+
+ m_paddr = vm_phys_paddr_to_vm_page(paddr);
+ if (m_paddr != NULL) {
+ memattr1 = pmap_page_get_memattr(m_paddr);
+ if (memattr1 != memattr)
+ memattr = memattr1;
+ }
+
+ if (((*mres)->flags & PG_FICTITIOUS) != 0) {
+ /*
+ * If the passed in result page is a fake page, update it with
+ * the new physical address.
+ */
+ page = *mres;
+ VM_OBJECT_WLOCK(object);
+ vm_page_updatefake(page, paddr, memattr);
+ } else {
+ vm_page_t mret;
+ /*
+ * Replace the passed in reqpage page with our own fake page and
+ * free up the original page.
+ */
+ page = vm_page_getfake(paddr, memattr);
+ VM_OBJECT_WLOCK(object);
+ mret = vm_page_replace(page, object, (*mres)->pindex);
+ KASSERT(mret == *mres,
+ ("invalid page replacement, old=%p, ret=%p", *mres, mret));
+ vm_page_lock(mret);
+ vm_page_free(mret);
+ vm_page_unlock(mret);
+ *mres = page;
+ }
+
+ page->valid = VM_PAGE_BITS_ALL;
+
+ return VM_PAGER_OK;
+}
+
+static struct cdev_pager_ops contigmem_cdev_pager_ops = {
+ .cdev_pg_ctor = contigmem_cdev_pager_ctor,
+ .cdev_pg_dtor = contigmem_cdev_pager_dtor,
+ .cdev_pg_fault = contigmem_cdev_pager_fault,
+};
+
+static int
contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
struct vm_object **obj, int nprot)
{
+ struct contigmem_vm_handle *vmh;
uint64_t buffer_index;
/*
@@ -227,10 +365,17 @@ contigmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
if (buffer_index >= contigmem_num_buffers)
return EINVAL;
- memset(contigmem_buffers[buffer_index], 0, contigmem_buffer_size);
- *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index]);
- *obj = vm_pager_allocate(OBJT_DEVICE, cdev, size, nprot, *offset,
- curthread->td_ucred);
+ if (size > contigmem_buffer_size)
+ return EINVAL;
+
+ vmh = malloc(sizeof(*vmh), M_CONTIGMEM, M_NOWAIT | M_ZERO);
+ if (vmh == NULL)
+ return ENOMEM;
+ vmh->buffer_index = buffer_index;
+
+ *offset = (vm_ooffset_t)vtophys(contigmem_buffers[buffer_index].addr);
+ *obj = cdev_pager_allocate(vmh, OBJT_DEVICE, &contigmem_cdev_pager_ops,
+ size, nprot, *offset, curthread->td_ucred);
return 0;
}
diff --git a/lib/librte_eal/common/eal_common_proc.c b/lib/librte_eal/common/eal_common_proc.c
index 12e0fcac..60526cad 100644
--- a/lib/librte_eal/common/eal_common_proc.c
+++ b/lib/librte_eal/common/eal_common_proc.c
@@ -46,10 +46,10 @@ rte_eal_primary_proc_alive(const char *config_file_path)
if (config_file_path)
config_fd = open(config_file_path, O_RDONLY);
else {
- char default_path[PATH_MAX+1];
- snprintf(default_path, PATH_MAX, RUNTIME_CONFIG_FMT,
- default_config_dir, "rte");
- config_fd = open(default_path, O_RDONLY);
+ const char *path;
+
+ path = eal_runtime_config_path();
+ config_fd = open(path, O_RDONLY);
}
if (config_fd < 0)
return 0;
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
index 5bbe906b..9b9635d7 100644
--- a/lib/librte_eal/common/include/rte_version.h
+++ b/lib/librte_eal/common/include/rte_version.h
@@ -66,7 +66,7 @@ extern "C" {
/**
* Patch level number i.e. the z in yy.mm.z
*/
-#define RTE_VER_MINOR 2
+#define RTE_VER_MINOR 3
/**
* Extra string to be appended to version number
diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c
index 42568e1d..08516af2 100644
--- a/lib/librte_eal/common/malloc_elem.c
+++ b/lib/librte_eal/common/malloc_elem.c
@@ -314,17 +314,16 @@ malloc_elem_free(struct malloc_elem *elem)
int
malloc_elem_resize(struct malloc_elem *elem, size_t size)
{
- const size_t new_size = size + MALLOC_ELEM_OVERHEAD;
+ const size_t new_size = size + elem->pad + MALLOC_ELEM_OVERHEAD;
/* if we request a smaller size, then always return ok */
- const size_t current_size = elem->size - elem->pad;
- if (current_size >= new_size)
+ if (elem->size >= new_size)
return 0;
struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
rte_spinlock_lock(&elem->heap->lock);
if (next ->state != ELEM_FREE)
goto err_return;
- if (current_size + next->size < new_size)
+ if (elem->size + next->size < new_size)
goto err_return;
/* we now know the element fits, so remove from free list,
@@ -333,7 +332,7 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size)
elem_free_list_remove(next);
join_elem(elem, next);
- if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD){
+ if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD) {
/* now we have a big block together. Lets cut it down a bit, by splitting */
struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
split_pt = RTE_PTR_ALIGN_CEIL(split_pt, RTE_CACHE_LINE_SIZE);
diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h
index d077b49e..8667f29c 100644
--- a/lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h
+++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/igb.h
@@ -607,7 +607,7 @@ struct igb_adapter {
int int_mode;
u32 rss_queues;
u32 vmdq_pools;
- char fw_version[32];
+ char fw_version[43];
u32 wvbr;
struct igb_mac_addr *mac_table;
#ifdef CONFIG_IGB_VMDQ_NETDEV
diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
index 8c15c4c9..978ac601 100644
--- a/lib/librte_lpm/rte_lpm.c
+++ b/lib/librte_lpm/rte_lpm.c
@@ -1034,7 +1034,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
*/
struct rte_lpm_tbl_entry new_tbl24_entry = {
- .group_idx = (uint8_t)tbl8_group_index,
+ .group_idx = tbl8_group_index,
.valid = VALID,
.valid_group = 1,
.depth = 0,
@@ -1080,7 +1080,7 @@ add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
*/
struct rte_lpm_tbl_entry new_tbl24_entry = {
- .group_idx = (uint8_t)tbl8_group_index,
+ .group_idx = tbl8_group_index,
.valid = VALID,
.valid_group = 1,
.depth = 0,
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index ead7c6ea..8a814df2 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -1060,6 +1060,7 @@ static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
* Array size
* @return
* - 0: Success
+ * - -ENOENT: Not enough entries in the mempool; no mbufs are retrieved.
*/
static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
struct rte_mbuf **mbufs, unsigned count)
@@ -1331,7 +1332,7 @@ static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
*/
static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
{
- __rte_mbuf_sanity_check(m, 1);
+ __rte_mbuf_sanity_check(m, 0);
return m->data_off;
}
@@ -1345,7 +1346,7 @@ static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
*/
static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
{
- __rte_mbuf_sanity_check(m, 1);
+ __rte_mbuf_sanity_check(m, 0);
return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
m->data_len);
}
diff --git a/lib/librte_mbuf/rte_mbuf_ptype.h b/lib/librte_mbuf/rte_mbuf_ptype.h
index a3269c4c..acd70bb6 100644
--- a/lib/librte_mbuf/rte_mbuf_ptype.h
+++ b/lib/librte_mbuf/rte_mbuf_ptype.h
@@ -341,11 +341,11 @@ extern "C" {
* Packet format:
* <'ether type'=0x0800
* | 'version'=4, 'protocol'=17
- * | 'destination port'=4798>
+ * | 'destination port'=4789>
* or,
* <'ether type'=0x86DD
* | 'version'=6, 'next header'=17
- * | 'destination port'=4798>
+ * | 'destination port'=4789>
*/
#define RTE_PTYPE_TUNNEL_VXLAN 0x00003000
/**
diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c
index 0cb1c677..d25e1c02 100644
--- a/lib/librte_vhost/vhost_user.c
+++ b/lib/librte_vhost/vhost_user.c
@@ -110,6 +110,10 @@ vhost_backend_cleanup(struct virtio_net *dev)
rte_free(dev->mem);
dev->mem = NULL;
}
+
+ free(dev->guest_pages);
+ dev->guest_pages = NULL;
+
if (dev->log_addr) {
munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
dev->log_addr = 0;
diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c
index ea027f14..0027f393 100644
--- a/lib/librte_vhost/virtio_net.c
+++ b/lib/librte_vhost/virtio_net.c
@@ -50,10 +50,19 @@
#define MAX_PKT_BURST 32
#define VHOST_LOG_PAGE 4096
+/*
+ * Atomically set a bit in memory.
+ */
+static inline void __attribute__((always_inline))
+vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
+{
+ __sync_fetch_and_or_8(addr, (1U << nr));
+}
+
static inline void __attribute__((always_inline))
vhost_log_page(uint8_t *log_base, uint64_t page)
{
- log_base[page / 8] |= 1 << (page % 8);
+ vhost_set_bit(page % 8, &log_base[page / 8]);
}
static inline void __attribute__((always_inline))
@@ -144,11 +153,16 @@ update_shadow_used_ring(struct vhost_virtqueue *vq,
static void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
- if (m_buf->ol_flags & PKT_TX_L4_MASK) {
+ uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
+
+ if (m_buf->ol_flags & PKT_TX_TCP_SEG)
+ csum_l4 |= PKT_TX_TCP_CKSUM;
+
+ if (csum_l4) {
net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
- switch (m_buf->ol_flags & PKT_TX_L4_MASK) {
+ switch (csum_l4) {
case PKT_TX_TCP_CKSUM:
net_hdr->csum_offset = (offsetof(struct tcp_hdr,
cksum));
@@ -164,6 +178,15 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
}
}
+ /* IP cksum verification cannot be bypassed, then calculate here */
+ if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
+ struct ipv4_hdr *ipv4_hdr;
+
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
+ m_buf->l2_len);
+ ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+ }
+
if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
if (m_buf->ol_flags & PKT_TX_IPV4)
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
@@ -630,9 +653,11 @@ static inline bool
virtio_net_with_host_offload(struct virtio_net *dev)
{
if (dev->features &
- (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_ECN |
- VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 |
- VIRTIO_NET_F_HOST_UFO))
+ ((1ULL << VIRTIO_NET_F_CSUM) |
+ (1ULL << VIRTIO_NET_F_HOST_ECN) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) |
+ (1ULL << VIRTIO_NET_F_HOST_UFO)))
return true;
return false;
diff --git a/pkg/dpdk.spec b/pkg/dpdk.spec
index 017d959f..aab74cdd 100644
--- a/pkg/dpdk.spec
+++ b/pkg/dpdk.spec
@@ -30,7 +30,7 @@
# OF THE POSSIBILITY OF SUCH DAMAGE.
Name: dpdk
-Version: 16.11.2
+Version: 16.11.3
Release: 1
Packager: packaging@6wind.com
URL: http://dpdk.org