diff options
author | Ricardo Salveti <ricardo.salveti@linaro.org> | 2016-07-18 15:30:06 -0300 |
---|---|---|
committer | Ricardo Salveti <ricardo.salveti@linaro.org> | 2016-07-18 15:30:30 -0300 |
commit | a41e6ff15809d40e0f9bbc9576bf8f7f80fbec1d (patch) | |
tree | c9e6fc399c2738e84ed2585e6e51e90f9608ca12 | |
parent | 8b25d1ad5d2264bdfc2818c7bda74ee2697df6db (diff) |
Imported Upstream version 16.07-rc2
Change-Id: Ie9e8ec528a2a0dace085c5e44aa7fa3b489d4ba0
Signed-off-by: Ricardo Salveti <ricardo.salveti@linaro.org>
149 files changed, 4501 insertions, 2207 deletions
diff --git a/GNUmakefile b/GNUmakefile index b59e4b6e..00fe0db7 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -40,6 +40,6 @@ export RTE_SDK # directory list # -ROOTDIRS-y := lib drivers app +ROOTDIRS-y := buildtools lib drivers app include $(RTE_SDK)/mk/rte.sdkroot.mk diff --git a/MAINTAINERS b/MAINTAINERS index a59191ea..f996c2ed 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -68,6 +68,10 @@ F: lib/librte_compat/ F: doc/guides/rel_notes/deprecation.rst F: scripts/validate-abi.sh +Driver information +F: buildtools/pmdinfogen/ +F: tools/pmdinfo.py + Environment Abstraction Layer ----------------------------- diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index b6b61ad3..f90befc8 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -94,10 +94,6 @@ static struct cmdline *testpmd_cl; static void cmd_reconfig_device_queue(portid_t id, uint8_t dev, uint8_t queue); -#ifdef RTE_NIC_BYPASS -uint8_t bypass_is_supported(portid_t port_id); -#endif - /* *** Help command with introduction. *** */ struct cmd_help_brief_result { cmdline_fixed_string_t help; @@ -3656,9 +3652,6 @@ cmd_set_bypass_mode_parsed(void *parsed_result, portid_t port_id = res->port_id; uint32_t bypass_mode = RTE_BYPASS_MODE_NORMAL; - if (!bypass_is_supported(port_id)) - return; - if (!strcmp(res->value, "bypass")) bypass_mode = RTE_BYPASS_MODE_BYPASS; else if (!strcmp(res->value, "isolate")) @@ -3725,9 +3718,6 @@ cmd_set_bypass_event_parsed(void *parsed_result, uint32_t bypass_event = RTE_BYPASS_EVENT_NONE; uint32_t bypass_mode = RTE_BYPASS_MODE_NORMAL; - if (!bypass_is_supported(port_id)) - return; - if (!strcmp(res->event_value, "timeout")) bypass_event = RTE_BYPASS_EVENT_TIMEOUT; else if (!strcmp(res->event_value, "os_on")) @@ -3903,9 +3893,6 @@ cmd_show_bypass_config_parsed(void *parsed_result, "timeout"}; int num_events = (sizeof events) / (sizeof events[0]); - if (!bypass_is_supported(port_id)) - return; - /* Display the bypass mode.*/ if (0 != rte_eth_dev_bypass_state_show(port_id, &bypass_mode)) { printf("\tFailed to get bypass mode for port = %d\n", port_id); @@ -10800,29 +10787,3 @@ cmd_reconfig_device_queue(portid_t id, uint8_t dev, uint8_t queue) ports[id].need_reconfig_queues = queue; } } - -#ifdef RTE_NIC_BYPASS -#include <rte_pci_dev_ids.h> -uint8_t -bypass_is_supported(portid_t port_id) -{ - struct rte_port *port; - struct rte_pci_id *pci_id; - - if (port_id_is_invalid(port_id, ENABLED_WARN)) - return 0; - - /* Get the device id. */ - port = &ports[port_id]; - pci_id = &port->dev_info.pci_dev->id; - - /* Check if NIC supports bypass. */ - if (pci_id->device_id == IXGBE_DEV_ID_82599_BYPASS) { - return 1; - } - else { - printf("\tBypass not supported for port_id = %d.\n", port_id); - return 0; - } -} -#endif diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index c5865f95..bfcbff9c 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -1201,14 +1201,9 @@ simple_fwd_config_setup(void) } /** - * For the RSS forwarding test, each core is assigned on every port a transmit - * queue whose index is the index of the core itself. This approach limits the - * maximumm number of processing cores of the RSS test to the maximum number of - * TX queues supported by the devices. - * - * Each core is assigned a single stream, each stream being composed of - * a RX queue to poll on a RX port for input messages, associated with - * a TX queue of a TX port where to send forwarded packets. + * For the RSS forwarding test all streams distributed over lcores. Each stream + * being composed of a RX queue to poll on a RX port for input messages, + * associated with a TX queue of a TX port where to send forwarded packets. * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two * following rules: @@ -1222,7 +1217,7 @@ rss_fwd_config_setup(void) portid_t txp; queueid_t rxq; queueid_t nb_q; - lcoreid_t lc_id; + streamid_t sm_id; nb_q = nb_rxq; if (nb_q > nb_txq) @@ -1241,10 +1236,10 @@ rss_fwd_config_setup(void) setup_fwd_config_of_each_lcore(&cur_fwd_config); rxp = 0; rxq = 0; - for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_streams; lc_id++) { + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { struct fwd_stream *fs; - fs = fwd_streams[lc_id]; + fs = fwd_streams[sm_id]; if ((rxp & 0x1) == 0) txp = (portid_t) (rxp + 1); diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 06885ceb..b7f28e96 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -272,6 +272,9 @@ uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF; #endif +/* default period is 1 second */ +static uint64_t timer_period = 1; + /* * Ethernet device configuration. */ @@ -877,17 +880,35 @@ flush_fwd_rx_queues(void) uint16_t nb_rx; uint16_t i; uint8_t j; + uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; + + /* convert to number of cycles */ + timer_period *= rte_get_timer_hz(); for (j = 0; j < 2; j++) { for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { for (rxq = 0; rxq < nb_rxq; rxq++) { port_id = fwd_ports_ids[rxp]; + /** + * testpmd can stuck in the below do while loop + * if rte_eth_rx_burst() always returns nonzero + * packets. So timer is added to exit this loop + * after 1sec timer expiry. + */ + prev_tsc = rte_rdtsc(); do { nb_rx = rte_eth_rx_burst(port_id, rxq, pkts_burst, MAX_PKT_BURST); for (i = 0; i < nb_rx; i++) rte_pktmbuf_free(pkts_burst[i]); - } while (nb_rx > 0); + + cur_tsc = rte_rdtsc(); + diff_tsc = cur_tsc - prev_tsc; + timer_tsc += diff_tsc; + } while ((nb_rx > 0) && + (timer_tsc < timer_period)); + prev_tsc = cur_tsc; + timer_tsc = 0; } } rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c index fbfe1d0d..33325a8b 100644 --- a/app/test/test_cryptodev.c +++ b/app/test/test_cryptodev.c @@ -186,12 +186,12 @@ testsuite_setup(void) if (nb_devs < 2) { for (i = nb_devs; i < 2; i++) { ret = rte_eal_vdev_init( - CRYPTODEV_NAME_AESNI_MB_PMD, NULL); + RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL); TEST_ASSERT(ret == 0, "Failed to create instance %u of" " pmd : %s", - i, CRYPTODEV_NAME_AESNI_MB_PMD); + i, RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)); } } } @@ -203,10 +203,10 @@ testsuite_setup(void) if (nb_devs < 2) { for (i = nb_devs; i < 2; i++) { TEST_ASSERT_SUCCESS(rte_eal_vdev_init( - CRYPTODEV_NAME_AESNI_GCM_PMD, NULL), + RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), NULL), "Failed to create instance %u of" " pmd : %s", - i, CRYPTODEV_NAME_AESNI_GCM_PMD); + i, RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD)); } } } @@ -217,10 +217,10 @@ testsuite_setup(void) if (nb_devs < 2) { for (i = nb_devs; i < 2; i++) { TEST_ASSERT_SUCCESS(rte_eal_vdev_init( - CRYPTODEV_NAME_SNOW3G_PMD, NULL), + RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), NULL), "Failed to create instance %u of" " pmd : %s", - i, CRYPTODEV_NAME_SNOW3G_PMD); + i, RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD)); } } } @@ -231,10 +231,10 @@ testsuite_setup(void) if (nb_devs < 2) { for (i = nb_devs; i < 2; i++) { TEST_ASSERT_SUCCESS(rte_eal_vdev_init( - CRYPTODEV_NAME_KASUMI_PMD, NULL), + RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), NULL), "Failed to create instance %u of" " pmd : %s", - i, CRYPTODEV_NAME_KASUMI_PMD); + i, RTE_STR(CRYPTODEV_NAME_KASUMI_PMD)); } } } @@ -246,12 +246,12 @@ testsuite_setup(void) if (nb_devs < 2) { for (i = nb_devs; i < 2; i++) { int dev_id = rte_eal_vdev_init( - CRYPTODEV_NAME_NULL_PMD, NULL); + RTE_STR(CRYPTODEV_NAME_NULL_PMD), NULL); TEST_ASSERT(dev_id >= 0, "Failed to create instance %u of" " pmd : %s", - i, CRYPTODEV_NAME_NULL_PMD); + i, RTE_STR(CRYPTODEV_NAME_NULL_PMD)); } } } @@ -2295,7 +2295,7 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata) plaintext_pad_len); memcpy(plaintext, tdata->plaintext.data, plaintext_len); - TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len); + TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len); /* Create SNOW3G operation */ retval = create_snow3g_kasumi_cipher_operation(tdata->iv.data, tdata->iv.len, @@ -2316,7 +2316,7 @@ test_snow3g_encryption(const struct snow3g_test_data *tdata) else ciphertext = plaintext; - TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len); + TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len); /* Validate obuf */ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT( @@ -2368,7 +2368,7 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata) rte_pktmbuf_append(ut_params->obuf, plaintext_pad_len); memcpy(plaintext, tdata->plaintext.data, plaintext_len); - TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len); + TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len); /* Create SNOW3G operation */ retval = create_snow3g_kasumi_cipher_operation_oop(tdata->iv.data, @@ -2390,7 +2390,7 @@ test_snow3g_encryption_oop(const struct snow3g_test_data *tdata) else ciphertext = plaintext; - TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len); + TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len); /* Validate obuf */ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT( @@ -2549,7 +2549,7 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata) ciphertext_pad_len); memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len); - TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len); + TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, ciphertext_len); /* Create SNOW3G operation */ retval = create_snow3g_kasumi_cipher_operation(tdata->iv.data, tdata->iv.len, @@ -2569,7 +2569,7 @@ static int test_snow3g_decryption(const struct snow3g_test_data *tdata) else plaintext = ciphertext; - TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len); + TEST_HEXDUMP(stdout, "plaintext:", plaintext, ciphertext_len); /* Validate obuf */ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(plaintext, @@ -2622,7 +2622,7 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata) rte_pktmbuf_append(ut_params->obuf, ciphertext_pad_len); memcpy(ciphertext, tdata->ciphertext.data, ciphertext_len); - TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len); + TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, ciphertext_len); /* Create SNOW3G operation */ retval = create_snow3g_kasumi_cipher_operation_oop(tdata->iv.data, @@ -2643,7 +2643,7 @@ static int test_snow3g_decryption_oop(const struct snow3g_test_data *tdata) else plaintext = ciphertext; - TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len); + TEST_HEXDUMP(stdout, "plaintext:", plaintext, ciphertext_len); /* Validate obuf */ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT(plaintext, @@ -2689,7 +2689,7 @@ test_snow3g_authenticated_encryption(const struct snow3g_test_data *tdata) plaintext_pad_len); memcpy(plaintext, tdata->plaintext.data, plaintext_len); - TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len); + TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len); /* Create SNOW3G operation */ retval = create_snow3g_kasumi_cipher_hash_operation(tdata->digest.data, @@ -2717,7 +2717,7 @@ test_snow3g_authenticated_encryption(const struct snow3g_test_data *tdata) else ciphertext = plaintext; - TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len); + TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len); /* Validate obuf */ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT( @@ -2774,7 +2774,7 @@ test_snow3g_encrypted_authentication(const struct snow3g_test_data *tdata) plaintext_pad_len); memcpy(plaintext, tdata->plaintext.data, plaintext_len); - TEST_HEXDUMP(stdout, "plaintext:", plaintext, tdata->plaintext.len); + TEST_HEXDUMP(stdout, "plaintext:", plaintext, plaintext_len); /* Create SNOW3G operation */ retval = create_snow3g_kasumi_auth_cipher_operation( @@ -2805,7 +2805,7 @@ test_snow3g_encrypted_authentication(const struct snow3g_test_data *tdata) ut_params->digest = rte_pktmbuf_mtod(ut_params->obuf, uint8_t *) + plaintext_pad_len + tdata->aad.len + tdata->iv.len; - TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, tdata->ciphertext.len); + TEST_HEXDUMP(stdout, "ciphertext:", ciphertext, plaintext_len); /* Validate obuf */ TEST_ASSERT_BUFFERS_ARE_EQUAL_BIT( @@ -3002,6 +3002,7 @@ create_gcm_session(uint8_t dev_id, enum rte_crypto_cipher_operation op, ut_params->cipher_xform.next = NULL; ut_params->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_GCM; + ut_params->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; ut_params->cipher_xform.cipher.op = op; ut_params->cipher_xform.cipher.key.data = cipher_key; ut_params->cipher_xform.cipher.key.length = key_len; diff --git a/app/test/test_cryptodev_perf.c b/app/test/test_cryptodev_perf.c index d7282112..815c41ff 100644 --- a/app/test/test_cryptodev_perf.c +++ b/app/test/test_cryptodev_perf.c @@ -120,15 +120,15 @@ static const char *chain_mode_name(enum chain_mode mode) static const char *pmd_name(enum rte_cryptodev_type pmd) { switch (pmd) { - case RTE_CRYPTODEV_NULL_PMD: return CRYPTODEV_NAME_NULL_PMD; break; + case RTE_CRYPTODEV_NULL_PMD: return RTE_STR(CRYPTODEV_NAME_NULL_PMD); break; case RTE_CRYPTODEV_AESNI_GCM_PMD: - return CRYPTODEV_NAME_AESNI_GCM_PMD; + return RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD); case RTE_CRYPTODEV_AESNI_MB_PMD: - return CRYPTODEV_NAME_AESNI_MB_PMD; + return RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD); case RTE_CRYPTODEV_QAT_SYM_PMD: - return CRYPTODEV_NAME_QAT_SYM_PMD; + return RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD); case RTE_CRYPTODEV_SNOW3G_PMD: - return CRYPTODEV_NAME_SNOW3G_PMD; + return RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD); default: return ""; } @@ -249,11 +249,11 @@ testsuite_setup(void) if (nb_devs < 2) { for (i = nb_devs; i < 2; i++) { ret = rte_eal_vdev_init( - CRYPTODEV_NAME_AESNI_MB_PMD, NULL); + RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), NULL); TEST_ASSERT(ret == 0, "Failed to create instance %u of pmd : %s", - i, CRYPTODEV_NAME_AESNI_MB_PMD); + i, RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD)); } } } @@ -264,11 +264,11 @@ testsuite_setup(void) if (nb_devs < 2) { for (i = nb_devs; i < 2; i++) { ret = rte_eal_vdev_init( - CRYPTODEV_NAME_SNOW3G_PMD, NULL); + RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), NULL); TEST_ASSERT(ret == 0, "Failed to create instance %u of pmd : %s", - i, CRYPTODEV_NAME_SNOW3G_PMD); + i, RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD)); } } } diff --git a/app/test/test_hash.c b/app/test/test_hash.c index 7e41725a..29abcd9e 100644 --- a/app/test/test_hash.c +++ b/app/test/test_hash.c @@ -421,6 +421,46 @@ static int test_add_update_delete(void) } /* + * Sequence of operations for retrieving a key with its position + * + * - create table + * - add key + * - get the key with its position: hit + * - delete key + * - try to get the deleted key: miss + * + */ +static int test_hash_get_key_with_position(void) +{ + struct rte_hash *handle = NULL; + int pos, expectedPos, result; + void *key; + + ut_params.name = "hash_get_key_w_pos"; + handle = rte_hash_create(&ut_params); + RETURN_IF_ERROR(handle == NULL, "hash creation failed"); + + pos = rte_hash_add_key(handle, &keys[0]); + print_key_info("Add", &keys[0], pos); + RETURN_IF_ERROR(pos < 0, "failed to add key (pos0=%d)", pos); + expectedPos = pos; + + result = rte_hash_get_key_with_position(handle, pos, &key); + RETURN_IF_ERROR(result != 0, "error retrieving a key"); + + pos = rte_hash_del_key(handle, &keys[0]); + print_key_info("Del", &keys[0], pos); + RETURN_IF_ERROR(pos != expectedPos, + "failed to delete key (pos0=%d)", pos); + + result = rte_hash_get_key_with_position(handle, pos, &key); + RETURN_IF_ERROR(result != -ENOENT, "non valid key retrieved"); + + rte_hash_free(handle); + return 0; +} + +/* * Sequence of operations for find existing hash table * * - create table @@ -1442,6 +1482,8 @@ test_hash(void) return -1; if (test_hash_add_delete_jhash_3word() < 0) return -1; + if (test_hash_get_key_with_position() < 0) + return -1; if (test_hash_find_existing() < 0) return -1; if (test_add_update_delete() < 0) diff --git a/app/test/test_red.c b/app/test/test_red.c index 2384c556..7d1c32c4 100644 --- a/app/test/test_red.c +++ b/app/test/test_red.c @@ -274,46 +274,6 @@ static int check_avg(double *diff, double avg, double exp_avg, double tolerance) } /** - * get the clk frequency in Hz - */ -static uint64_t get_machclk_freq(void) -{ - uint64_t start = 0; - uint64_t end = 0; - uint64_t diff = 0; - static uint64_t clk_freq_hz; - struct timespec tv_start = {0, 0}, tv_end = {0, 0}; - struct timespec req = {0, 0}; - - if (clk_freq_hz != 0) - return clk_freq_hz; - - req.tv_sec = 0; - req.tv_nsec = NSEC_PER_SEC / 4; - - clock_gettime(CLOCK_REALTIME, &tv_start); - start = rte_rdtsc(); - - if (nanosleep(&req, NULL) != 0) { - perror("get_machclk_freq()"); - exit(EXIT_FAILURE); - } - - clock_gettime(CLOCK_REALTIME, &tv_end); - end = rte_rdtsc(); - - diff = (uint64_t)(tv_end.tv_sec - tv_start.tv_sec) * USEC_PER_SEC - + ((tv_end.tv_nsec - tv_start.tv_nsec + TEST_NSEC_MARGIN) / - USEC_PER_MSEC); /**< diff is in micro secs */ - - if (diff == 0) - return 0; - - clk_freq_hz = ((end - start) * USEC_PER_SEC / diff); - return clk_freq_hz; -} - -/** * initialize the test rte_red config */ static enum test_result @@ -321,7 +281,7 @@ test_rte_red_init(struct test_config *tcfg) { unsigned i = 0; - tcfg->tvar->clk_freq = get_machclk_freq(); + tcfg->tvar->clk_freq = rte_get_timer_hz(); init_port_ts( tcfg->tvar->clk_freq ); for (i = 0; i < tcfg->tconfig->num_cfg; i++) { diff --git a/buildtools/Makefile b/buildtools/Makefile new file mode 100644 index 00000000..35a42ff5 --- /dev/null +++ b/buildtools/Makefile @@ -0,0 +1,36 @@ +# BSD LICENSE +# +# Copyright(c) 2016 Neil Horman. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +DIRS-y += pmdinfogen + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/buildtools/pmdinfogen/Makefile b/buildtools/pmdinfogen/Makefile new file mode 100644 index 00000000..3885d3b3 --- /dev/null +++ b/buildtools/pmdinfogen/Makefile @@ -0,0 +1,49 @@ +# BSD LICENSE +# +# Copyright(c) 2016 Neil Horman. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +HOSTAPP = pmdinfogen + +# +# all sources are stored in SRCS-y +# +SRCS-y += pmdinfogen.c + +HOST_CFLAGS += $(WERROR_FLAGS) -g +HOST_CFLAGS += -I$(RTE_OUTPUT)/include + +DEPDIRS-y += lib/librte_eal + +include $(RTE_SDK)/mk/rte.hostapp.mk diff --git a/buildtools/pmdinfogen/pmdinfogen.c b/buildtools/pmdinfogen/pmdinfogen.c new file mode 100644 index 00000000..e1bf2e46 --- /dev/null +++ b/buildtools/pmdinfogen/pmdinfogen.c @@ -0,0 +1,420 @@ +/* Postprocess pmd object files to export hw support + * + * Copyright 2016 Neil Horman <nhorman@tuxdriver.com> + * Based in part on modpost.c from the linux kernel + * + * This software may be used and distributed according to the terms + * of the GNU General Public License V2, incorporated herein by reference. + * + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <ctype.h> +#include <string.h> +#include <limits.h> +#include <stdbool.h> +#include <errno.h> +#include <libgen.h> + +#include <rte_common.h> +#include "pmdinfogen.h" + +#ifdef RTE_ARCH_64 +#define ADDR_SIZE 64 +#else +#define ADDR_SIZE 32 +#endif + + +static const char *sym_name(struct elf_info *elf, Elf_Sym *sym) +{ + if (sym) + return elf->strtab + sym->st_name; + else + return "(unknown)"; +} + +static void *grab_file(const char *filename, unsigned long *size) +{ + struct stat st; + void *map = MAP_FAILED; + int fd; + + fd = open(filename, O_RDONLY); + if (fd < 0) + return NULL; + if (fstat(fd, &st)) + goto failed; + + *size = st.st_size; + map = mmap(NULL, *size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); + +failed: + close(fd); + if (map == MAP_FAILED) + return NULL; + return map; +} + +/** + * Return a copy of the next line in a mmap'ed file. + * spaces in the beginning of the line is trimmed away. + * Return a pointer to a static buffer. + **/ +static void release_file(void *file, unsigned long size) +{ + munmap(file, size); +} + + +static void *get_sym_value(struct elf_info *info, const Elf_Sym *sym) +{ + return RTE_PTR_ADD(info->hdr, + info->sechdrs[sym->st_shndx].sh_offset + sym->st_value); +} + +static Elf_Sym *find_sym_in_symtab(struct elf_info *info, + const char *name, Elf_Sym *last) +{ + Elf_Sym *idx; + if (last) + idx = last+1; + else + idx = info->symtab_start; + + for (; idx < info->symtab_stop; idx++) { + const char *n = sym_name(info, idx); + if (!strncmp(n, name, strlen(name))) + return idx; + } + return NULL; +} + +static int parse_elf(struct elf_info *info, const char *filename) +{ + unsigned int i; + Elf_Ehdr *hdr; + Elf_Shdr *sechdrs; + Elf_Sym *sym; + int endian; + unsigned int symtab_idx = ~0U, symtab_shndx_idx = ~0U; + + hdr = grab_file(filename, &info->size); + if (!hdr) { + perror(filename); + exit(1); + } + info->hdr = hdr; + if (info->size < sizeof(*hdr)) { + /* file too small, assume this is an empty .o file */ + return 0; + } + /* Is this a valid ELF file? */ + if ((hdr->e_ident[EI_MAG0] != ELFMAG0) || + (hdr->e_ident[EI_MAG1] != ELFMAG1) || + (hdr->e_ident[EI_MAG2] != ELFMAG2) || + (hdr->e_ident[EI_MAG3] != ELFMAG3)) { + /* Not an ELF file - silently ignore it */ + return 0; + } + + if (!hdr->e_ident[EI_DATA]) { + /* Unknown endian */ + return 0; + } + + endian = hdr->e_ident[EI_DATA]; + + /* Fix endianness in ELF header */ + hdr->e_type = TO_NATIVE(endian, 16, hdr->e_type); + hdr->e_machine = TO_NATIVE(endian, 16, hdr->e_machine); + hdr->e_version = TO_NATIVE(endian, 32, hdr->e_version); + hdr->e_entry = TO_NATIVE(endian, ADDR_SIZE, hdr->e_entry); + hdr->e_phoff = TO_NATIVE(endian, ADDR_SIZE, hdr->e_phoff); + hdr->e_shoff = TO_NATIVE(endian, ADDR_SIZE, hdr->e_shoff); + hdr->e_flags = TO_NATIVE(endian, 32, hdr->e_flags); + hdr->e_ehsize = TO_NATIVE(endian, 16, hdr->e_ehsize); + hdr->e_phentsize = TO_NATIVE(endian, 16, hdr->e_phentsize); + hdr->e_phnum = TO_NATIVE(endian, 16, hdr->e_phnum); + hdr->e_shentsize = TO_NATIVE(endian, 16, hdr->e_shentsize); + hdr->e_shnum = TO_NATIVE(endian, 16, hdr->e_shnum); + hdr->e_shstrndx = TO_NATIVE(endian, 16, hdr->e_shstrndx); + + sechdrs = RTE_PTR_ADD(hdr, hdr->e_shoff); + info->sechdrs = sechdrs; + + /* Check if file offset is correct */ + if (hdr->e_shoff > info->size) { + fprintf(stderr, "section header offset=%lu in file '%s' " + "is bigger than filesize=%lu\n", + (unsigned long)hdr->e_shoff, + filename, info->size); + return 0; + } + + if (hdr->e_shnum == SHN_UNDEF) { + /* + * There are more than 64k sections, + * read count from .sh_size. + */ + info->num_sections = TO_NATIVE(endian, 32, sechdrs[0].sh_size); + } else { + info->num_sections = hdr->e_shnum; + } + if (hdr->e_shstrndx == SHN_XINDEX) + info->secindex_strings = + TO_NATIVE(endian, 32, sechdrs[0].sh_link); + else + info->secindex_strings = hdr->e_shstrndx; + + /* Fix endianness in section headers */ + for (i = 0; i < info->num_sections; i++) { + sechdrs[i].sh_name = + TO_NATIVE(endian, 32, sechdrs[i].sh_name); + sechdrs[i].sh_type = + TO_NATIVE(endian, 32, sechdrs[i].sh_type); + sechdrs[i].sh_flags = + TO_NATIVE(endian, 32, sechdrs[i].sh_flags); + sechdrs[i].sh_addr = + TO_NATIVE(endian, ADDR_SIZE, sechdrs[i].sh_addr); + sechdrs[i].sh_offset = + TO_NATIVE(endian, ADDR_SIZE, sechdrs[i].sh_offset); + sechdrs[i].sh_size = + TO_NATIVE(endian, 32, sechdrs[i].sh_size); + sechdrs[i].sh_link = + TO_NATIVE(endian, 32, sechdrs[i].sh_link); + sechdrs[i].sh_info = + TO_NATIVE(endian, 32, sechdrs[i].sh_info); + sechdrs[i].sh_addralign = + TO_NATIVE(endian, ADDR_SIZE, sechdrs[i].sh_addralign); + sechdrs[i].sh_entsize = + TO_NATIVE(endian, ADDR_SIZE, sechdrs[i].sh_entsize); + } + /* Find symbol table. */ + for (i = 1; i < info->num_sections; i++) { + int nobits = sechdrs[i].sh_type == SHT_NOBITS; + + if (!nobits && sechdrs[i].sh_offset > info->size) { + fprintf(stderr, "%s is truncated. " + "sechdrs[i].sh_offset=%lu > sizeof(*hrd)=%zu\n", + filename, (unsigned long)sechdrs[i].sh_offset, + sizeof(*hdr)); + return 0; + } + + if (sechdrs[i].sh_type == SHT_SYMTAB) { + unsigned int sh_link_idx; + symtab_idx = i; + info->symtab_start = RTE_PTR_ADD(hdr, + sechdrs[i].sh_offset); + info->symtab_stop = RTE_PTR_ADD(hdr, + sechdrs[i].sh_offset + sechdrs[i].sh_size); + sh_link_idx = sechdrs[i].sh_link; + info->strtab = RTE_PTR_ADD(hdr, + sechdrs[sh_link_idx].sh_offset); + } + + /* 32bit section no. table? ("more than 64k sections") */ + if (sechdrs[i].sh_type == SHT_SYMTAB_SHNDX) { + symtab_shndx_idx = i; + info->symtab_shndx_start = RTE_PTR_ADD(hdr, + sechdrs[i].sh_offset); + info->symtab_shndx_stop = RTE_PTR_ADD(hdr, + sechdrs[i].sh_offset + sechdrs[i].sh_size); + } + } + if (!info->symtab_start) + fprintf(stderr, "%s has no symtab?\n", filename); + + /* Fix endianness in symbols */ + for (sym = info->symtab_start; sym < info->symtab_stop; sym++) { + sym->st_shndx = TO_NATIVE(endian, 16, sym->st_shndx); + sym->st_name = TO_NATIVE(endian, 32, sym->st_name); + sym->st_value = TO_NATIVE(endian, ADDR_SIZE, sym->st_value); + sym->st_size = TO_NATIVE(endian, ADDR_SIZE, sym->st_size); + } + + if (symtab_shndx_idx != ~0U) { + Elf32_Word *p; + if (symtab_idx != sechdrs[symtab_shndx_idx].sh_link) + fprintf(stderr, + "%s: SYMTAB_SHNDX has bad sh_link: %u!=%u\n", + filename, sechdrs[symtab_shndx_idx].sh_link, + symtab_idx); + /* Fix endianness */ + for (p = info->symtab_shndx_start; p < info->symtab_shndx_stop; + p++) + *p = TO_NATIVE(endian, 32, *p); + } + + return 1; +} + +static void parse_elf_finish(struct elf_info *info) +{ + struct pmd_driver *tmp, *idx = info->drivers; + release_file(info->hdr, info->size); + while (idx) { + tmp = idx->next; + free(idx); + idx = tmp; + } +} + +struct opt_tag { + const char *suffix; + const char *json_id; +}; + +static const struct opt_tag opt_tags[] = { + {"_param_string_export", "params"}, +}; + +static int complete_pmd_entry(struct elf_info *info, struct pmd_driver *drv) +{ + const char *tname; + int i; + char tmpsymname[128]; + Elf_Sym *tmpsym; + + drv->name = get_sym_value(info, drv->name_sym); + + for (i = 0; i < PMD_OPT_MAX; i++) { + memset(tmpsymname, 0, 128); + sprintf(tmpsymname, "__%s%s", drv->name, opt_tags[i].suffix); + tmpsym = find_sym_in_symtab(info, tmpsymname, NULL); + if (!tmpsym) + continue; + drv->opt_vals[i] = get_sym_value(info, tmpsym); + } + + memset(tmpsymname, 0, 128); + sprintf(tmpsymname, "__%s_pci_tbl_export", drv->name); + + tmpsym = find_sym_in_symtab(info, tmpsymname, NULL); + + + /* + * If this returns NULL, then this is a PMD_VDEV, because + * it has no pci table reference + */ + if (!tmpsym) { + drv->pci_tbl = NULL; + return 0; + } + + tname = get_sym_value(info, tmpsym); + tmpsym = find_sym_in_symtab(info, tname, NULL); + if (!tmpsym) + return -ENOENT; + + drv->pci_tbl = (struct rte_pci_id *)get_sym_value(info, tmpsym); + if (!drv->pci_tbl) + return -ENOENT; + + return 0; +} + +static int locate_pmd_entries(struct elf_info *info) +{ + Elf_Sym *last = NULL; + struct pmd_driver *new; + + info->drivers = NULL; + + do { + new = calloc(sizeof(struct pmd_driver), 1); + new->name_sym = find_sym_in_symtab(info, "this_pmd_name", last); + last = new->name_sym; + if (!new->name_sym) + free(new); + else { + if (complete_pmd_entry(info, new)) { + fprintf(stderr, + "Failed to complete pmd entry\n"); + free(new); + } else { + new->next = info->drivers; + info->drivers = new; + } + } + } while (last); + + return 0; +} + +static void output_pmd_info_string(struct elf_info *info, char *outfile) +{ + FILE *ofd; + struct pmd_driver *drv; + struct rte_pci_id *pci_ids; + int idx = 0; + + ofd = fopen(outfile, "w+"); + if (!ofd) { + fprintf(stderr, "Unable to open output file\n"); + return; + } + + drv = info->drivers; + + while (drv) { + fprintf(ofd, "const char %s_pmd_info[] __attribute__((used)) = " + "\"PMD_INFO_STRING= {", + drv->name); + fprintf(ofd, "\\\"name\\\" : \\\"%s\\\", ", drv->name); + + for (idx = 0; idx < PMD_OPT_MAX; idx++) { + if (drv->opt_vals[idx]) + fprintf(ofd, "\\\"%s\\\" : \\\"%s\\\", ", + opt_tags[idx].json_id, + drv->opt_vals[idx]); + } + + pci_ids = drv->pci_tbl; + fprintf(ofd, "\\\"pci_ids\\\" : ["); + + while (pci_ids && pci_ids->device_id) { + fprintf(ofd, "[%d, %d, %d, %d]", + pci_ids->vendor_id, pci_ids->device_id, + pci_ids->subsystem_vendor_id, + pci_ids->subsystem_device_id); + pci_ids++; + if (pci_ids->device_id) + fprintf(ofd, ","); + else + fprintf(ofd, " "); + } + fprintf(ofd, "]}\";"); + drv = drv->next; + } + + fclose(ofd); +} + +int main(int argc, char **argv) +{ + struct elf_info info; + int rc = 1; + + if (argc < 3) { + fprintf(stderr, + "usage: %s <object file> <c output file>\n", + basename(argv[0])); + exit(127); + } + parse_elf(&info, argv[1]); + + locate_pmd_entries(&info); + + if (info.drivers) { + output_pmd_info_string(&info, argv[2]); + rc = 0; + } else { + fprintf(stderr, "No drivers registered\n"); + } + + parse_elf_finish(&info); + exit(rc); +} diff --git a/buildtools/pmdinfogen/pmdinfogen.h b/buildtools/pmdinfogen/pmdinfogen.h new file mode 100644 index 00000000..1da2966f --- /dev/null +++ b/buildtools/pmdinfogen/pmdinfogen.h @@ -0,0 +1,120 @@ + +/* Postprocess pmd object files to export hw support + * + * Copyright 2016 Neil Horman <nhorman@tuxdriver.com> + * Based in part on modpost.c from the linux kernel + * + * This software may be used and distributed according to the terms + * of the GNU General Public License V2, incorporated herein by reference. + * + */ + +#include <stdio.h> +#include <stdlib.h> +#include <stdarg.h> +#include <string.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <sys/mman.h> +#include <fcntl.h> +#include <unistd.h> +#include <elf.h> +#include <rte_config.h> +#include <rte_pci.h> +#include <rte_byteorder.h> + +/* On BSD-alike OSes elf.h defines these according to host's word size */ +#undef ELF_ST_BIND +#undef ELF_ST_TYPE +#undef ELF_R_SYM +#undef ELF_R_TYPE + +/* + * Define ELF64_* to ELF_*, the latter being defined in both 32 and 64 bit + * flavors in elf.h. This makes our code a bit more generic between arches + * and allows us to support 32 bit code in the future should we ever want to + */ +#ifdef RTE_ARCH_64 +#define Elf_Ehdr Elf64_Ehdr +#define Elf_Shdr Elf64_Shdr +#define Elf_Sym Elf64_Sym +#define Elf_Addr Elf64_Addr +#define Elf_Sword Elf64_Sxword +#define Elf_Section Elf64_Half +#define ELF_ST_BIND ELF64_ST_BIND +#define ELF_ST_TYPE ELF64_ST_TYPE + +#define Elf_Rel Elf64_Rel +#define Elf_Rela Elf64_Rela +#define ELF_R_SYM ELF64_R_SYM +#define ELF_R_TYPE ELF64_R_TYPE +#else +#define Elf_Ehdr Elf32_Ehdr +#define Elf_Shdr Elf32_Shdr +#define Elf_Sym Elf32_Sym +#define Elf_Addr Elf32_Addr +#define Elf_Sword Elf32_Sxword +#define Elf_Section Elf32_Half +#define ELF_ST_BIND ELF32_ST_BIND +#define ELF_ST_TYPE ELF32_ST_TYPE + +#define Elf_Rel Elf32_Rel +#define Elf_Rela Elf32_Rela +#define ELF_R_SYM ELF32_R_SYM +#define ELF_R_TYPE ELF32_R_TYPE +#endif + + +/* + * Note, it seems odd that we have both a CONVERT_NATIVE and a TO_NATIVE macro + * below. We do this because the values passed to TO_NATIVE may themselves be + * macros and need both macros here to get expanded. Specifically its the width + * variable we are concerned with, because it needs to get expanded prior to + * string concatenation + */ +#define CONVERT_NATIVE(fend, width, x) ({ \ +typeof(x) ___x; \ +if ((fend) == ELFDATA2LSB) \ + ___x = rte_le_to_cpu_##width(x); \ +else \ + ___x = rte_be_to_cpu_##width(x); \ + ___x; \ +}) + +#define TO_NATIVE(fend, width, x) CONVERT_NATIVE(fend, width, x) + +enum opt_params { + PMD_PARAM_STRING = 0, + PMD_OPT_MAX +}; + +struct pmd_driver { + Elf_Sym *name_sym; + const char *name; + struct rte_pci_id *pci_tbl; + struct pmd_driver *next; + + const char *opt_vals[PMD_OPT_MAX]; +}; + +struct elf_info { + unsigned long size; + Elf_Ehdr *hdr; + Elf_Shdr *sechdrs; + Elf_Sym *symtab_start; + Elf_Sym *symtab_stop; + char *strtab; + + /* support for 32bit section numbers */ + + unsigned int num_sections; /* max_secindex + 1 */ + unsigned int secindex_strings; + /* if Nth symbol table entry has .st_shndx = SHN_XINDEX, + * take shndx from symtab_shndx_start[N] instead + */ + Elf32_Word *symtab_shndx_start; + Elf32_Word *symtab_shndx_stop; + + struct pmd_driver *drivers; +}; + diff --git a/config/common_base b/config/common_base index cfee8257..78305356 100644 --- a/config/common_base +++ b/config/common_base @@ -282,6 +282,11 @@ CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DRIVER=n CONFIG_RTE_LIBRTE_VIRTIO_DEBUG_DUMP=n # +# Compile virtio device emulation inside virtio PMD driver +# +CONFIG_RTE_VIRTIO_USER=n + +# # Compile burst-oriented VMXNET3 PMD driver # CONFIG_RTE_LIBRTE_VMXNET3_PMD=y diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md index 5e7f024d..2284a53b 100644 --- a/doc/api/doxy-api-index.md +++ b/doc/api/doxy-api-index.md @@ -45,7 +45,6 @@ There are many libraries, so their headers may be grouped by topics: [vhost] (@ref rte_virtio_net.h), [KNI] (@ref rte_kni.h), [PCI] (@ref rte_pci.h), - [PCI IDs] (@ref rte_pci_dev_ids.h) - **memory**: [memseg] (@ref rte_memory.h), diff --git a/doc/guides/cryptodevs/kasumi.rst b/doc/guides/cryptodevs/kasumi.rst index d6b3a975..7346b217 100644 --- a/doc/guides/cryptodevs/kasumi.rst +++ b/doc/guides/cryptodevs/kasumi.rst @@ -52,6 +52,9 @@ Limitations * Chained mbufs are not supported. * KASUMI(F9) supported only if hash offset field is byte-aligned. +* In-place bit-level operations for KASUMI(F8) are not supported + (if length and/or offset of data to be ciphered is not byte-aligned). + Installation ------------ diff --git a/doc/guides/freebsd_gsg/build_dpdk.rst b/doc/guides/freebsd_gsg/build_dpdk.rst index 1d92c089..93c43661 100644 --- a/doc/guides/freebsd_gsg/build_dpdk.rst +++ b/doc/guides/freebsd_gsg/build_dpdk.rst @@ -183,7 +183,7 @@ contains the kernel modules to install: ls x86_64-native-bsdapp-gcc - app build hostapp include kmod lib Makefile + app build include kmod lib Makefile .. _loading_contigmem: diff --git a/doc/guides/linux_gsg/build_dpdk.rst b/doc/guides/linux_gsg/build_dpdk.rst index 198c0b6f..fb2c481d 100644 --- a/doc/guides/linux_gsg/build_dpdk.rst +++ b/doc/guides/linux_gsg/build_dpdk.rst @@ -152,7 +152,7 @@ A kmod directory is also present that contains kernel modules which may be load ls x86_64-native-linuxapp-gcc - app build hostapp include kmod lib Makefile + app build include kmod lib Makefile Loading Modules to Enable Userspace IO for DPDK ----------------------------------------------- diff --git a/doc/guides/linux_gsg/enable_func.rst b/doc/guides/linux_gsg/enable_func.rst index ec0e04d9..04e066c9 100644 --- a/doc/guides/linux_gsg/enable_func.rst +++ b/doc/guides/linux_gsg/enable_func.rst @@ -183,8 +183,8 @@ High Performance of Small Packets on 40G NIC As there might be firmware fixes for performance enhancement in latest version of firmware image, the firmware update might be needed for getting high performance. Check with the local Intel's Network Division application engineers for firmware updates. -The base driver to support firmware version of FVL3E will be integrated in the next -DPDK release, so currently the validated firmware version is 4.2.6. +Users should consult the release notes specific to a DPDK release to identify +the validated firmware version for a NIC using the i40e driver. Use 16 Bytes RX Descriptor Size ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/guides/nics/overview.rst b/doc/guides/nics/overview.rst index a23eb5cc..572ced41 100644 --- a/doc/guides/nics/overview.rst +++ b/doc/guides/nics/overview.rst @@ -128,7 +128,7 @@ Most of these differences are summarized below. Packet type parsing Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Timesync Y Y Y Y Y Basic stats Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Extended stats Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y + Extended stats Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Stats per queue Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y EEPROM dump Y Y Y Y Registers dump Y Y Y Y Y Y Y Y diff --git a/doc/guides/nics/virtio.rst b/doc/guides/nics/virtio.rst index 06ca433a..c6335d40 100644 --- a/doc/guides/nics/virtio.rst +++ b/doc/guides/nics/virtio.rst @@ -73,7 +73,7 @@ In this release, the virtio PMD driver provides the basic functionality of packe * It supports multicast packets and promiscuous mode. -* The descriptor number for the RX/TX queue is hard-coded to be 256 by qemu. +* The descriptor number for the Rx/Tx queue is hard-coded to be 256 by qemu. If given a different descriptor number by the upper application, the virtio PMD generates a warning and fall back to the hard-coded value. @@ -163,8 +163,9 @@ Host2VM communication example which means received packets come from vEth0, and transmitted packets is sent to vEth0. #. In the guest, bind the virtio device to the uio_pci_generic kernel module and start the forwarding application. - When the virtio port in guest bursts rx, it is getting packets from the raw socket's receive queue. - When the virtio port bursts tx, it is sending packet to the tx_q. + When the virtio port in guest bursts Rx, it is getting packets from the + raw socket's receive queue. + When the virtio port bursts Tx, it is sending packet to the tx_q. .. code-block:: console @@ -183,7 +184,9 @@ Host2VM communication example The packet reception and transmission flow path is: - IXIA packet generator->82599 PF->KNI rx queue->KNI raw socket queue->Guest VM virtio port 0 rx burst->Guest VM virtio port 0 tx burst-> KNI tx queue->82599 PF-> IXIA packet generator + IXIA packet generator->82599 PF->KNI Rx queue->KNI raw socket queue->Guest + VM virtio port 0 Rx burst->Guest VM virtio port 0 Tx burst-> KNI Tx queue + ->82599 PF-> IXIA packet generator Virtio with qemu virtio Back End -------------------------------- @@ -206,8 +209,68 @@ Virtio with qemu virtio Back End In this example, the packet reception flow path is: - IXIA packet generator->82599 PF->Linux Bridge->TAP0's socket queue-> Guest VM virtio port 0 rx burst-> Guest VM 82599 VF port1 tx burst-> IXIA packet generator + IXIA packet generator->82599 PF->Linux Bridge->TAP0's socket queue-> Guest + VM virtio port 0 Rx burst-> Guest VM 82599 VF port1 Tx burst-> IXIA packet + generator The packet transmission flow is: - IXIA packet generator-> Guest VM 82599 VF port1 rx burst-> Guest VM virtio port 0 tx burst-> tap -> Linux Bridge->82599 PF-> IXIA packet generator + IXIA packet generator-> Guest VM 82599 VF port1 Rx burst-> Guest VM virtio + port 0 Tx burst-> tap -> Linux Bridge->82599 PF-> IXIA packet generator + + +Virtio PMD Rx/Tx Callbacks +-------------------------- + +Virtio driver has 3 Rx callbacks and 2 Tx callbacks. + +Rx callbacks: + +#. ``virtio_recv_pkts``: + Regular version without mergeable Rx buffer support. + +#. ``virtio_recv_mergeable_pkts``: + Regular version with mergeable Rx buffer support. + +#. ``virtio_recv_pkts_vec``: + Vector version without mergeable Rx buffer support, also fixes the available + ring indexes and uses vector instructions to optimize performance. + +Tx callbacks: + +#. ``virtio_xmit_pkts``: + Regular version. + +#. ``virtio_xmit_pkts_simple``: + Vector version fixes the available ring indexes to optimize performance. + + +By default, the non-vector callbacks are used: + +* For Rx: If mergeable Rx buffers is disabled then ``virtio_recv_pkts`` is + used; otherwise ``virtio_recv_mergeable_pkts``. + +* For Tx: ``virtio_xmit_pkts``. + + +Vector callbacks will be used when: + +* ``txq_flags`` is set to ``VIRTIO_SIMPLE_FLAGS`` (0xF01), which implies: + + * Single segment is specified. + + * No offload support is needed. + +* Mergeable Rx buffers is disabled. + +The corresponding callbacks are: + +* For Rx: ``virtio_recv_pkts_vec``. + +* For Tx: ``virtio_xmit_pkts_simple``. + + +Example of using the vector version of the virtio poll mode driver in +``testpmd``:: + + testpmd -c 0x7 -n 4 -- -i --txqflags=0xF01 --rxq=1 --txq=1 --nb-cores=1 diff --git a/doc/guides/prog_guide/dev_kit_build_system.rst b/doc/guides/prog_guide/dev_kit_build_system.rst index 3e89eaec..18a30104 100644 --- a/doc/guides/prog_guide/dev_kit_build_system.rst +++ b/doc/guides/prog_guide/dev_kit_build_system.rst @@ -70,7 +70,7 @@ Each build directory contains include files, libraries, and applications: ... ~/DEV/DPDK$ ls i686-native-linuxapp-gcc - app build hostapp include kmod lib Makefile + app build buildtools include kmod lib Makefile ~/DEV/DPDK$ ls i686-native-linuxapp-gcc/app/ @@ -264,7 +264,7 @@ These Makefiles generate a binary application. * rte.extapp.mk: External application -* rte.hostapp.mk: Host application in the development kit framework +* rte.hostapp.mk: prerequisite tool to build dpdk Library ^^^^^^^ @@ -304,6 +304,44 @@ Misc * rte.subdir.mk: Build several directories in the development kit framework. +.. _Internally_Generated_Build_Tools: + +Internally Generated Build Tools +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``app/pmdinfogen`` + + +``pmdinfogen`` scans an object (.o) file for various well known symbol names. These +well known symbol names are defined by various macros and used to export +important information about hardware support and usage for pmd files. For +instance the macro: + +.. code-block:: c + + PMD_REGISTER_DRIVER(drv, name) + +Creates the following symbol: + +.. code-block:: c + + static char this_pmd_name0[] __attribute__((used)) = "<name>"; + + +Which pmdinfogen scans for. Using this information other relevant bits of data +can be exported from the object file and used to produce a hardware support +description, that pmdinfogen then encodes into a json formatted string in the +following format: + +.. code-block:: c + + static char <name_pmd_string>="PMD_INFO_STRING=\"{'name' : '<name>', ...}\""; + + +These strings can then be searched for by external tools to determine the +hardware support of a given library or application. + + .. _Useful_Variables_Provided_by_the_Build_System: Useful Variables Provided by the Build System diff --git a/doc/guides/rel_notes/release_16_07.rst b/doc/guides/rel_notes/release_16_07.rst index 569f562f..e2af1474 100644 --- a/doc/guides/rel_notes/release_16_07.rst +++ b/doc/guides/rel_notes/release_16_07.rst @@ -195,6 +195,21 @@ Resolved Issues EAL ~~~ +* **igb_uio: Fixed possible mmap failure for Linux >= 4.5.** + + mmaping the iomem range of the PCI device fails for kernels that + enabled CONFIG_IO_STRICT_DEVMEM option: + + EAL: pci_map_resource(): + cannot mmap(39, 0x7f1c51800000, 0x100000, 0x0): + Invalid argument (0xffffffffffffffff) + + CONFIG_IO_STRICT_DEVMEM is introduced in Linux v4.5 + + Updated igb_uio to stop reserving PCI memory resources, from + kernel point of view iomem region looks like idle and mmap worked + again. This matches uio_pci_generic usage. + Drivers ~~~~~~~ diff --git a/doc/guides/sample_app_ug/l3_forward.rst b/doc/guides/sample_app_ug/l3_forward.rst index 491f99d9..c885cdb7 100644 --- a/doc/guides/sample_app_ug/l3_forward.rst +++ b/doc/guides/sample_app_ug/l3_forward.rst @@ -88,32 +88,46 @@ To compile the application: Running the Application ----------------------- -The application has a number of command line options: +The application has a number of command line options:: -.. code-block:: console + ./l3fwd [EAL options] -- -p PORTMASK + [-P] + [-E] + [-L] + --config(port,queue,lcore)[,(port,queue,lcore)] + [--eth-dest=X,MM:MM:MM:MM:MM:MM] + [--enable-jumbo [--max-pkt-len PKTLEN]] + [--no-numa] + [--hash-entry-num] + [--ipv6] + [--parse-ptype] + +Where, + +* ``-p PORTMASK:`` Hexadecimal bitmask of ports to configure - ./build/l3fwd [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa][--hash-entry-num][--ipv6] [--parse-ptype] +* ``-P:`` Optional, sets all ports to promiscuous mode so that packets are accepted regardless of the packet's Ethernet MAC destination address. + Without this option, only packets with the Ethernet MAC destination address set to the Ethernet address of the port are accepted. -where, +* ``-E:`` Optional, enable exact match. -* -p PORTMASK: Hexadecimal bitmask of ports to configure +* ``-L:`` Optional, enable longest prefix match. -* -P: optional, sets all ports to promiscuous mode so that packets are accepted regardless of the packet's Ethernet MAC destination address. - Without this option, only packets with the Ethernet MAC destination address set to the Ethernet address of the port are accepted. +* ``--config (port,queue,lcore)[,(port,queue,lcore)]:`` Determines which queues from which ports are mapped to which cores. -* --config (port,queue,lcore)[,(port,queue,lcore)]: determines which queues from which ports are mapped to which cores +* ``--eth-dest=X,MM:MM:MM:MM:MM:MM:`` Optional, ethernet destination for port X. -* --enable-jumbo: optional, enables jumbo frames +* ``--enable-jumbo:`` Optional, enables jumbo frames. -* --max-pkt-len: optional, maximum packet length in decimal (64-9600) +* ``--max-pkt-len:`` Optional, under the premise of enabling jumbo, maximum packet length in decimal (64-9600). -* --no-numa: optional, disables numa awareness +* ``--no-numa:`` Optional, disables numa awareness. -* --hash-entry-num: optional, specifies the hash entry number in hexadecimal to be setup +* ``--hash-entry-num:`` Optional, specifies the hash entry number in hexadecimal to be setup. -* --ipv6: optional, set it if running ipv6 packets +* ``--ipv6:`` Optional, set if running ipv6 packets. -* --parse-ptype: optional, set it if use software way to analyze packet type +* ``--parse-ptype:`` Optional, set to use software to analyze packet type. Without this option, hardware will check the packet type. For example, consider a dual processor socket platform where cores 0-7 and 16-23 appear on socket 0, while cores 8-15 and 24-31 appear on socket 1. Let's say that the programmer wants to use memory from both NUMA nodes, the platform has only two ports, one connected to each NUMA node, diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index 2987ef6b..dc0b0337 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -57,7 +57,7 @@ create_unique_device_name(char *name, size_t size) if (name == NULL) return -EINVAL; - ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_GCM_PMD, + ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), unique_name_id++); if (ret < 0) return ret; @@ -515,10 +515,13 @@ aesni_gcm_uninit(const char *name) } static struct rte_driver aesni_gcm_pmd_drv = { - .name = CRYPTODEV_NAME_AESNI_GCM_PMD, .type = PMD_VDEV, .init = aesni_gcm_init, .uninit = aesni_gcm_uninit }; -PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv); +PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv, CRYPTODEV_NAME_AESNI_GCM_PMD); +DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD, + "max_nb_queue_pairs=<int> " + "max_nb_sessions=<int> " + "socket_id=<int>"); diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h index a42f9414..9878d6e4 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h @@ -37,18 +37,18 @@ #define GCM_LOG_ERR(fmt, args...) \ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_AESNI_GCM_PMD, \ + RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \ __func__, __LINE__, ## args) #ifdef RTE_LIBRTE_AESNI_MB_DEBUG #define GCM_LOG_INFO(fmt, args...) \ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_AESNI_GCM_PMD, \ + RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \ __func__, __LINE__, ## args) #define GCM_LOG_DBG(fmt, args...) \ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_AESNI_GCM_PMD, \ + RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \ __func__, __LINE__, ## args) #else #define GCM_LOG_INFO(fmt, args...) diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index 6554fc4e..b2d0c8ca 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -54,7 +54,7 @@ create_unique_device_name(char *name, size_t size) if (name == NULL) return -EINVAL; - ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_MB_PMD, + ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), unique_name_id++); if (ret < 0) return ret; @@ -715,10 +715,13 @@ cryptodev_aesni_mb_uninit(const char *name) } static struct rte_driver cryptodev_aesni_mb_pmd_drv = { - .name = CRYPTODEV_NAME_AESNI_MB_PMD, .type = PMD_VDEV, .init = cryptodev_aesni_mb_init, .uninit = cryptodev_aesni_mb_uninit }; -PMD_REGISTER_DRIVER(cryptodev_aesni_mb_pmd_drv); +PMD_REGISTER_DRIVER(cryptodev_aesni_mb_pmd_drv, CRYPTODEV_NAME_AESNI_MB_PMD); +DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD, + "max_nb_queue_pairs=<int> " + "max_nb_sessions=<int> " + "socket_id=<int>"); diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h index 949d9a60..17f367f4 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h @@ -37,7 +37,7 @@ #define MB_LOG_ERR(fmt, args...) \ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_AESNI_MB_PMD, \ + RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), \ __func__, __LINE__, ## args) #ifdef RTE_LIBRTE_AESNI_MB_DEBUG diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c index 5f8c7a2e..4e217434 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd.c +++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c @@ -61,7 +61,7 @@ create_unique_device_name(char *name, size_t size) if (name == NULL) return -EINVAL; - ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_KASUMI_PMD, + ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), unique_name_id++); if (ret < 0) return ret; @@ -243,9 +243,12 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op, offset_in_bits = op->sym->cipher.data.offset; src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); - dst = op->sym->m_dst ? - rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *) : - rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); + if (op->sym->m_dst == NULL) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("bit-level in-place not supported\n"); + return 0; + } + dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); IV = *((uint64_t *)(op->sym->cipher.iv.data)); length_in_bits = op->sym->cipher.data.length; @@ -648,10 +651,13 @@ cryptodev_kasumi_uninit(const char *name) } static struct rte_driver cryptodev_kasumi_pmd_drv = { - .name = CRYPTODEV_NAME_KASUMI_PMD, .type = PMD_VDEV, .init = cryptodev_kasumi_init, .uninit = cryptodev_kasumi_uninit }; -PMD_REGISTER_DRIVER(cryptodev_kasumi_pmd_drv); +PMD_REGISTER_DRIVER(cryptodev_kasumi_pmd_drv, CRYPTODEV_NAME_KASUMI_PMD); +DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD, + "max_nb_queue_pairs=<int> " + "max_nb_sessions=<int> " + "socket_id=<int>"); diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c index da5854eb..b9285a43 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c @@ -57,8 +57,8 @@ static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = { .increment = 0 }, .aad_size = { - .min = 9, - .max = 9, + .min = 8, + .max = 8, .increment = 0 } }, } diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h index 04e1c437..fb586caa 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h @@ -37,18 +37,18 @@ #define KASUMI_LOG_ERR(fmt, args...) \ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_KASUMI_PMD, \ + RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \ __func__, __LINE__, ## args) #ifdef RTE_LIBRTE_KASUMI_DEBUG #define KASUMI_LOG_INFO(fmt, args...) \ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_KASUMI_PMD, \ + RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \ __func__, __LINE__, ## args) #define KASUMI_LOG_DBG(fmt, args...) \ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_KASUMI_PMD, \ + RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \ __func__, __LINE__, ## args) #else #define KASUMI_LOG_INFO(fmt, args...) diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c index bdaf13ca..909b04f9 100644 --- a/drivers/crypto/null/null_crypto_pmd.c +++ b/drivers/crypto/null/null_crypto_pmd.c @@ -51,7 +51,7 @@ create_unique_device_name(char *name, size_t size) if (name == NULL) return -EINVAL; - ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_NULL_PMD, + ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_NULL_PMD), unique_name_id++); if (ret < 0) return ret; @@ -269,10 +269,13 @@ cryptodev_null_uninit(const char *name) } static struct rte_driver cryptodev_null_pmd_drv = { - .name = CRYPTODEV_NAME_NULL_PMD, .type = PMD_VDEV, .init = cryptodev_null_init, .uninit = cryptodev_null_uninit }; -PMD_REGISTER_DRIVER(cryptodev_null_pmd_drv); +PMD_REGISTER_DRIVER(cryptodev_null_pmd_drv, CRYPTODEV_NAME_NULL_PMD); +DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD, + "max_nb_queue_pairs=<int> " + "max_nb_sessions=<int> " + "socket_id=<int>"); diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h index 2a4c739c..acebc973 100644 --- a/drivers/crypto/null/null_crypto_pmd_private.h +++ b/drivers/crypto/null/null_crypto_pmd_private.h @@ -37,18 +37,18 @@ #define NULL_CRYPTO_LOG_ERR(fmt, args...) \ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_NULL_PMD, \ + RTE_STR(CRYPTODEV_NAME_NULL_PMD), \ __func__, __LINE__, ## args) #ifdef RTE_LIBRTE_NULL_CRYPTO_DEBUG #define NULL_CRYPTO_LOG_INFO(fmt, args...) \ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_NULL_PMD, \ + RTE_STR(CRYPTODEV_NAME_NULL_PMD), \ __func__, __LINE__, ## args) #define NULL_CRYPTO_LOG_DBG(fmt, args...) \ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_NULL_PMD, \ + RTE_STR(CRYPTODEV_NAME_NULL_PMD), \ __func__, __LINE__, ## args) #else #define NULL_CRYPTO_LOG_INFO(fmt, args...) diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h index b47dbc23..243c1b40 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs.h +++ b/drivers/crypto/qat/qat_adf/qat_algs.h @@ -112,7 +112,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, uint8_t *authkey, uint32_t authkeylen, uint32_t add_auth_data_length, - uint32_t digestsize); + uint32_t digestsize, + unsigned int operation); void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header); diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c index aa108d47..185bb334 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c @@ -51,6 +51,7 @@ #include <rte_byteorder.h> #include <rte_log.h> #include <rte_malloc.h> +#include <rte_crypto_sym.h> #include "../qat_logs.h" #include "qat_algs.h" @@ -502,7 +503,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, uint8_t *authkey, uint32_t authkeylen, uint32_t add_auth_data_length, - uint32_t digestsize) + uint32_t digestsize, + unsigned int operation) { struct icp_qat_hw_cipher_algo_blk *cipher; struct icp_qat_hw_auth_algo_blk *hash; @@ -654,6 +656,12 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, ICP_QAT_FW_LA_NO_CMP_AUTH_RES); } + if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + } /* Cipher CD config setup */ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 940b2b63..d51ca968 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -560,14 +560,16 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, cipher_xform->key.data, cipher_xform->key.length, auth_xform->add_auth_data_length, - auth_xform->digest_length)) + auth_xform->digest_length, + auth_xform->op)) goto error_out; } else { if (qat_alg_aead_session_create_content_desc_auth(session, auth_xform->key.data, auth_xform->key.length, auth_xform->add_auth_data_length, - auth_xform->digest_length)) + auth_xform->digest_length, + auth_xform->op)) goto error_out; } return session; diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c index f46ec857..82ab047f 100644 --- a/drivers/crypto/qat/rte_qat_cryptodev.c +++ b/drivers/crypto/qat/rte_qat_cryptodev.c @@ -114,7 +114,6 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_ static struct rte_cryptodev_driver rte_qat_pmd = { { - .name = "rte_qat_pmd", .id_table = pci_id_qat_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING, }, @@ -134,4 +133,6 @@ static struct rte_driver pmd_qat_drv = { .init = rte_qat_pmd_init, }; -PMD_REGISTER_DRIVER(pmd_qat_drv); +PMD_REGISTER_DRIVER(pmd_qat_drv, CRYPTODEV_NAME_QAT_SYM_PMD); +DRIVER_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map); + diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c index dc8de6bd..87cd070a 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd.c +++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c @@ -60,7 +60,7 @@ create_unique_device_name(char *name, size_t size) if (name == NULL) return -EINVAL; - ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_SNOW3G_PMD, + ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), unique_name_id++); if (ret < 0) return ret; @@ -639,10 +639,13 @@ cryptodev_snow3g_uninit(const char *name) } static struct rte_driver cryptodev_snow3g_pmd_drv = { - .name = CRYPTODEV_NAME_SNOW3G_PMD, .type = PMD_VDEV, .init = cryptodev_snow3g_init, .uninit = cryptodev_snow3g_uninit }; -PMD_REGISTER_DRIVER(cryptodev_snow3g_pmd_drv); +PMD_REGISTER_DRIVER(cryptodev_snow3g_pmd_drv, CRYPTODEV_NAME_SNOW3G_PMD); +DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD, + "max_nb_queue_pairs=<int> " + "max_nb_sessions=<int> " + "socket_id=<int>"); diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h index b383cbcb..03973b97 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h +++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h @@ -37,18 +37,18 @@ #define SNOW3G_LOG_ERR(fmt, args...) \ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_SNOW3G_PMD, \ + RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \ __func__, __LINE__, ## args) #ifdef RTE_LIBRTE_SNOW3G_DEBUG #define SNOW3G_LOG_INFO(fmt, args...) \ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_SNOW3G_PMD, \ + RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \ __func__, __LINE__, ## args) #define SNOW3G_LOG_DBG(fmt, args...) \ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_SNOW3G_PMD, \ + RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \ __func__, __LINE__, ## args) #else #define SNOW3G_LOG_INFO(fmt, args...) diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c index 2d7f3448..f7955662 100644 --- a/drivers/net/af_packet/rte_eth_af_packet.c +++ b/drivers/net/af_packet/rte_eth_af_packet.c @@ -890,10 +890,15 @@ rte_pmd_af_packet_devuninit(const char *name) } static struct rte_driver pmd_af_packet_drv = { - .name = "eth_af_packet", .type = PMD_VDEV, .init = rte_pmd_af_packet_devinit, .uninit = rte_pmd_af_packet_devuninit, }; -PMD_REGISTER_DRIVER(pmd_af_packet_drv); +PMD_REGISTER_DRIVER(pmd_af_packet_drv, eth_af_packet); +DRIVER_REGISTER_PARAM_STRING(eth_af_packet, + "iface=<string> " + "qpairs=<int> " + "blocksz=<int> " + "framesz=<int> " + "framecnt=<int>"); diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile index c2ddd8d7..ab696801 100644 --- a/drivers/net/bnx2x/Makefile +++ b/drivers/net/bnx2x/Makefile @@ -31,7 +31,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_vfpf.c SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_DEBUG_PERIODIC) += debug.c # this lib depends upon: -DEPDIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += lib/librte_eal lib/librte_ether lib/librte_hash +DEPDIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += lib/librte_eal lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += lib/librte_mempool lib/librte_mbuf include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c index 3095d2bb..95fbad8d 100644 --- a/drivers/net/bnx2x/bnx2x.c +++ b/drivers/net/bnx2x/bnx2x.c @@ -22,7 +22,6 @@ #include "ecore_init_ops.h" #include "rte_version.h" -#include "rte_pci_dev_ids.h" #include <sys/types.h> #include <sys/stat.h> @@ -9572,7 +9571,7 @@ void bnx2x_load_firmware(struct bnx2x_softc *sc) int f; struct stat st; - fwname = sc->devinfo.device_id == BNX2X_DEV_ID_57711 + fwname = sc->devinfo.device_id == CHIP_NUM_57711 ? FW_NAME_57711 : FW_NAME_57810; f = open(fwname, O_RDONLY); if (f < 0) { @@ -9682,9 +9681,6 @@ int bnx2x_attach(struct bnx2x_softc *sc) sc->state = BNX2X_STATE_CLOSED; - /* Init RTE stuff */ - bnx2x_init_rte(sc); - pci_write_long(sc, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM; @@ -9702,6 +9698,9 @@ int bnx2x_attach(struct bnx2x_softc *sc) sc->igu_sb_cnt = 1; } + /* Init RTE stuff */ + bnx2x_init_rte(sc); + if (IS_PF(sc)) { /* get device info and set params */ if (bnx2x_get_device_info(sc) != 0) { diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index c24a5308..78757a8d 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -1906,14 +1906,6 @@ pci_find_cap(struct bnx2x_softc *sc, uint8_t id, uint8_t type) return NULL; } -static inline int is_valid_ether_addr(uint8_t *addr) -{ - if (!(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5])) - return 0; - else - return 1; -} - static inline void bnx2x_set_rx_mode(struct bnx2x_softc *sc) { diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c index 3ff57c42..c8d2bf2e 100644 --- a/drivers/net/bnx2x/bnx2x_ethdev.c +++ b/drivers/net/bnx2x/bnx2x_ethdev.c @@ -16,18 +16,67 @@ /* * The set of PCI devices this driver supports */ +#define BROADCOM_PCI_VENDOR_ID 0x14E4 static struct rte_pci_id pci_id_bnx2x_map[] = { -#define RTE_PCI_DEV_ID_DECL_BNX2X(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) }, +#ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) }, +#endif { .vendor_id = 0, } }; static struct rte_pci_id pci_id_bnx2xvf_map[] = { -#define RTE_PCI_DEV_ID_DECL_BNX2XVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) }, + { RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) }, { .vendor_id = 0, } }; +struct rte_bnx2x_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint32_t offset_hi; + uint32_t offset_lo; +}; + +static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = { + {"rx_buffer_drops", + offsetof(struct bnx2x_eth_stats, brb_drop_hi), + offsetof(struct bnx2x_eth_stats, brb_drop_lo)}, + {"rx_buffer_truncates", + offsetof(struct bnx2x_eth_stats, brb_truncate_hi), + offsetof(struct bnx2x_eth_stats, brb_truncate_lo)}, + {"rx_buffer_truncate_discard", + offsetof(struct bnx2x_eth_stats, brb_truncate_discard), + offsetof(struct bnx2x_eth_stats, brb_truncate_discard)}, + {"mac_filter_discard", + offsetof(struct bnx2x_eth_stats, mac_filter_discard), + offsetof(struct bnx2x_eth_stats, mac_filter_discard)}, + {"no_match_vlan_tag_discard", + offsetof(struct bnx2x_eth_stats, mf_tag_discard), + offsetof(struct bnx2x_eth_stats, mf_tag_discard)}, + {"tx_pause", + offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi), + offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)}, + {"rx_pause", + offsetof(struct bnx2x_eth_stats, pause_frames_received_hi), + offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)}, + {"tx_priority_flow_control", + offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi), + offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)}, + {"rx_priority_flow_control", + offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi), + offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)} +}; + static void bnx2x_link_update(struct rte_eth_dev *dev) { @@ -334,6 +383,52 @@ bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) brb_truncate_discard + stats->rx_nombuf; } +static int +bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings); + + if (xstats_names != NULL) + for (i = 0; i < stat_cnt; i++) + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", + bnx2x_xstats_strings[i].name); + + return stat_cnt; +} + +static int +bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct bnx2x_softc *sc = dev->data->dev_private; + unsigned int num = RTE_DIM(bnx2x_xstats_strings); + + if (n < num) + return num; + + bnx2x_stats_handle(sc, STATS_EVENT_UPDATE); + + for (num = 0; num < n; num++) { + if (bnx2x_xstats_strings[num].offset_hi != + bnx2x_xstats_strings[num].offset_lo) + xstats[num].value = HILO_U64( + *(uint32_t *)((char *)&sc->eth_stats + + bnx2x_xstats_strings[num].offset_hi), + *(uint32_t *)((char *)&sc->eth_stats + + bnx2x_xstats_strings[num].offset_lo)); + else + xstats[num].value = + *(uint64_t *)((char *)&sc->eth_stats + + bnx2x_xstats_strings[num].offset_lo); + } + + return num; +} + static void bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info) { @@ -376,6 +471,8 @@ static const struct eth_dev_ops bnx2x_eth_dev_ops = { .allmulticast_disable = bnx2x_dev_allmulticast_disable, .link_update = bnx2x_dev_link_update, .stats_get = bnx2x_dev_stats_get, + .xstats_get = bnx2x_dev_xstats_get, + .xstats_get_names = bnx2x_get_xstats_names, .dev_infos_get = bnx2x_dev_infos_get, .rx_queue_setup = bnx2x_dev_rx_queue_setup, .rx_queue_release = bnx2x_dev_rx_queue_release, @@ -399,6 +496,8 @@ static const struct eth_dev_ops bnx2xvf_eth_dev_ops = { .allmulticast_disable = bnx2x_dev_allmulticast_disable, .link_update = bnx2xvf_dev_link_update, .stats_get = bnx2x_dev_stats_get, + .xstats_get = bnx2x_dev_xstats_get, + .xstats_get_names = bnx2x_get_xstats_names, .dev_infos_get = bnx2x_dev_infos_get, .rx_queue_setup = bnx2x_dev_rx_queue_setup, .rx_queue_release = bnx2x_dev_rx_queue_release, @@ -566,5 +665,7 @@ static struct rte_driver rte_bnx2xvf_driver = { .init = rte_bnx2xvf_pmd_init, }; -PMD_REGISTER_DRIVER(rte_bnx2x_driver); -PMD_REGISTER_DRIVER(rte_bnx2xvf_driver); +PMD_REGISTER_DRIVER(rte_bnx2x_driver, bnx2x); +DRIVER_REGISTER_PCI_TABLE(bnx2x, pci_id_bnx2x_map); +PMD_REGISTER_DRIVER(rte_bnx2xvf_driver, bnx2xvf); +DRIVER_REGISTER_PCI_TABLE(bnx2xvf, pci_id_bnx2xvf_map); diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c index 14b1d10a..1c895f88 100644 --- a/drivers/net/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/bnx2x/bnx2x_vfpf.c @@ -293,10 +293,11 @@ int bnx2x_vf_get_resources(struct bnx2x_softc *sc, uint8_t tx_count, uint8_t rx_ sc->igu_sb_cnt, sc->igu_base_sb); strncpy(sc->fw_ver, sc_resp.fw_ver, sizeof(sc->fw_ver)); - if (is_valid_ether_addr(sc_resp.resc.current_mac_addr)) - (void)rte_memcpy(sc->link_params.mac_addr, - sc_resp.resc.current_mac_addr, - ETH_ALEN); + if (is_valid_assigned_ether_addr(&sc_resp.resc.current_mac_addr)) + ether_addr_copy(&sc_resp.resc.current_mac_addr, + (struct ether_addr *)sc->link_params.mac_addr); + else + eth_random_addr(sc->link_params.mac_addr); return 0; } diff --git a/drivers/net/bnx2x/bnx2x_vfpf.h b/drivers/net/bnx2x/bnx2x_vfpf.h index 966240cc..f854d81b 100644 --- a/drivers/net/bnx2x/bnx2x_vfpf.h +++ b/drivers/net/bnx2x/bnx2x_vfpf.h @@ -114,7 +114,7 @@ struct vf_resc { uint8_t num_vlan_filters; uint8_t num_mc_filters; uint8_t permanent_mac_addr[ETH_ALEN]; - uint8_t current_mac_addr[ETH_ALEN]; + struct ether_addr current_mac_addr; uint16_t pf_link_speed; uint32_t pf_link_supported; }; diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 406e38a6..3795facd 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -56,10 +56,31 @@ static const char bnxt_version[] = "Broadcom Cumulus driver " DRV_MODULE_NAME "\n"; +#define PCI_VENDOR_ID_BROADCOM 0x14E4 + +#define BROADCOM_DEV_ID_57301 0x16c8 +#define BROADCOM_DEV_ID_57302 0x16c9 +#define BROADCOM_DEV_ID_57304_PF 0x16ca +#define BROADCOM_DEV_ID_57304_VF 0x16cb +#define BROADCOM_DEV_ID_57402 0x16d0 +#define BROADCOM_DEV_ID_57404 0x16d1 +#define BROADCOM_DEV_ID_57406_PF 0x16d2 +#define BROADCOM_DEV_ID_57406_VF 0x16d3 +#define BROADCOM_DEV_ID_57406_MF 0x16d4 +#define BROADCOM_DEV_ID_57314 0x16df + static struct rte_pci_id bnxt_pci_id_map[] = { -#define RTE_PCI_DEV_ID_DECL_BNXT(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" - {.device_id = 0}, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) }, + { .vendor_id = 0, /* sentinel */ }, }; #define BNXT_ETH_RSS_SUPPORT ( \ @@ -1041,9 +1062,9 @@ static int bnxt_rte_pmd_init(const char *name, const char *params __rte_unused) } static struct rte_driver bnxt_pmd_drv = { - .name = "eth_bnxt", .type = PMD_PDEV, .init = bnxt_rte_pmd_init, }; -PMD_REGISTER_DRIVER(bnxt_pmd_drv); +PMD_REGISTER_DRIVER(bnxt_pmd_drv, bnxt); +DRIVER_REGISTER_PCI_TABLE(bnxt, bnxt_pci_id_map); diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 5d81a60d..2ed4c2f1 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -177,8 +177,7 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS; if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; - req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST | - HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST | + req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST | mask); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c index 48a50e4e..2f7ae70c 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -1068,7 +1068,7 @@ bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4, } static void -bond_mode_8023ad_setup_v1604(struct rte_eth_dev *dev, +bond_mode_8023ad_setup_v20(struct rte_eth_dev *dev, struct rte_eth_bond_8023ad_conf *conf) { struct rte_eth_bond_8023ad_conf def_conf; @@ -1214,7 +1214,7 @@ free_out: } int -rte_eth_bond_8023ad_conf_get_v1604(uint8_t port_id, +rte_eth_bond_8023ad_conf_get_v20(uint8_t port_id, struct rte_eth_bond_8023ad_conf *conf) { struct rte_eth_dev *bond_dev; @@ -1229,7 +1229,7 @@ rte_eth_bond_8023ad_conf_get_v1604(uint8_t port_id, bond_mode_8023ad_conf_get(bond_dev, conf); return 0; } -VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1604, 16.04); +VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v20, 2.0); int rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id, @@ -1278,7 +1278,7 @@ bond_8023ad_setup_validate(uint8_t port_id, } int -rte_eth_bond_8023ad_setup_v1604(uint8_t port_id, +rte_eth_bond_8023ad_setup_v20(uint8_t port_id, struct rte_eth_bond_8023ad_conf *conf) { struct rte_eth_dev *bond_dev; @@ -1289,11 +1289,11 @@ rte_eth_bond_8023ad_setup_v1604(uint8_t port_id, return err; bond_dev = &rte_eth_devices[port_id]; - bond_mode_8023ad_setup_v1604(bond_dev, conf); + bond_mode_8023ad_setup_v20(bond_dev, conf); return 0; } -VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v1604, 16.04); +VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v20, 2.0); int rte_eth_bond_8023ad_setup_v1607(uint8_t port_id, diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h index 1de34bc8..6b8ff575 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.h +++ b/drivers/net/bonding/rte_eth_bond_8023ad.h @@ -188,7 +188,7 @@ int rte_eth_bond_8023ad_conf_get(uint8_t port_id, struct rte_eth_bond_8023ad_conf *conf); int -rte_eth_bond_8023ad_conf_get_v1604(uint8_t port_id, +rte_eth_bond_8023ad_conf_get_v20(uint8_t port_id, struct rte_eth_bond_8023ad_conf *conf); int rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id, @@ -209,7 +209,7 @@ int rte_eth_bond_8023ad_setup(uint8_t port_id, struct rte_eth_bond_8023ad_conf *conf); int -rte_eth_bond_8023ad_setup_v1604(uint8_t port_id, +rte_eth_bond_8023ad_setup_v20(uint8_t port_id, struct rte_eth_bond_8023ad_conf *conf); int rte_eth_bond_8023ad_setup_v1607(uint8_t port_id, diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index 9a2518fb..b20a2729 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -2509,10 +2509,20 @@ bond_ethdev_configure(struct rte_eth_dev *dev) } static struct rte_driver bond_drv = { - .name = "eth_bond", .type = PMD_VDEV, .init = bond_init, .uninit = bond_uninit, }; -PMD_REGISTER_DRIVER(bond_drv); +PMD_REGISTER_DRIVER(bond_drv, eth_bond); + +DRIVER_REGISTER_PARAM_STRING(eth_bond, + "slave=<ifc> " + "primary=<ifc> " + "mode=[0-6] " + "xmit_policy=[l2 | l23 | l34] " + "socket_id=<int> " + "mac=<mac addr> " + "lsc_poll_period_ms=<int> " + "up_delay=<int> " + "down_delay=<int>"); diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index 6c130ed2..9208a615 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -934,10 +934,17 @@ static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); struct adapter *adapter = pi->adapter; - regs->length = cxgbe_get_regs_len(eth_dev); regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) | - (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) | - (1 << 16); + (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) | + (1 << 16); + + if (regs->data == NULL) { + regs->length = cxgbe_get_regs_len(eth_dev); + regs->width = sizeof(uint32_t); + + return 0; + } + t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t))); return 0; @@ -971,7 +978,6 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .get_eeprom_length = cxgbe_get_eeprom_length, .get_eeprom = cxgbe_get_eeprom, .set_eeprom = cxgbe_set_eeprom, - .get_reg_length = cxgbe_get_regs_len, .get_reg = cxgbe_get_regs, }; @@ -1056,9 +1062,10 @@ static int rte_cxgbe_pmd_init(const char *name __rte_unused, } static struct rte_driver rte_cxgbe_driver = { - .name = "cxgbe_driver", .type = PMD_PDEV, .init = rte_cxgbe_pmd_init, }; -PMD_REGISTER_DRIVER(rte_cxgbe_driver); +PMD_REGISTER_DRIVER(rte_cxgbe_driver, cxgb4); +DRIVER_REGISTER_PCI_TABLE(cxgb4, cxgb4_pci_tbl); + diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index e8bf8dad..6c25c8da 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -35,6 +35,8 @@ #define _E1000_ETHDEV_H_ #include <rte_time.h> +#define E1000_INTEL_VENDOR_ID 0x8086 + /* need update link, bit flag */ #define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) #define E1000_FLAG_MAILBOX (uint32_t)(1 << 1) diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c index 653be092..ad104ed7 100644 --- a/drivers/net/e1000/em_ethdev.c +++ b/drivers/net/e1000/em_ethdev.c @@ -137,11 +137,38 @@ static enum e1000_fc_mode em_fc_setting = e1000_fc_full; * The set of PCI devices this driver supports */ static const struct rte_pci_id pci_id_em_map[] = { - -#define RTE_PCI_DEV_ID_DECL_EM(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" - -{0}, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82540EM) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82545EM_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82546EB_QUAD_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_DUAL) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_SERDES_QUAD) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571PT_QUAD_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82571EB_QUAD_COPPER_LP) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_COPPER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_FIBER) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI_SERDES) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82572EI) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82573L) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574L) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82574LA) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82583V) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_LM) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPT_I217_V) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_LM) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LPTLP_I218_V) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM2) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) }, + { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) }, + { .vendor_id = 0, /* sentinel */ }, }; static const struct eth_dev_ops eth_em_ops = { @@ -1777,4 +1804,5 @@ struct rte_driver em_pmd_drv = { .init = rte_em_pmd_init, }; -PMD_REGISTER_DRIVER(em_pmd_drv); +PMD_REGISTER_DRIVER(em_pmd_drv, em); +DRIVER_REGISTER_PCI_TABLE(em, pci_id_em_map); diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 5067d208..fbf4d090 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -386,7 +386,6 @@ static const struct eth_dev_ops eth_igb_ops = { .timesync_disable = igb_timesync_disable, .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp, .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp, - .get_reg_length = eth_igb_get_reg_length, .get_reg = eth_igb_get_regs, .get_eeprom_length = eth_igb_get_eeprom_length, .get_eeprom = eth_igb_get_eeprom, @@ -426,7 +425,6 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = { .rxq_info_get = igb_rxq_info_get, .txq_info_get = igb_txq_info_get, .mac_addr_set = igbvf_default_mac_addr_set, - .get_reg_length = igbvf_get_reg_length, .get_reg = igbvf_get_regs, }; @@ -4945,6 +4943,12 @@ eth_igb_get_regs(struct rte_eth_dev *dev, int count = 0; const struct reg_info *reg_group; + if (data == NULL) { + regs->length = eth_igb_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + /* Support only full register dump */ if ((regs->length == 0) || (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) { @@ -4969,6 +4973,12 @@ igbvf_get_regs(struct rte_eth_dev *dev, int count = 0; const struct reg_info *reg_group; + if (data == NULL) { + regs->length = igbvf_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + /* Support only full register dump */ if ((regs->length == 0) || (regs->length == (uint32_t)igbvf_get_reg_length(dev))) { @@ -5210,5 +5220,7 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev) E1000_WRITE_FLUSH(hw); } -PMD_REGISTER_DRIVER(pmd_igb_drv); -PMD_REGISTER_DRIVER(pmd_igbvf_drv); +PMD_REGISTER_DRIVER(pmd_igb_drv, igb); +DRIVER_REGISTER_PCI_TABLE(igb, pci_id_igb_map); +PMD_REGISTER_DRIVER(pmd_igbvf_drv, igbvf); +DRIVER_REGISTER_PCI_TABLE(igbvf, pci_id_igbvf_map); diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c index a21a9513..a3649d8b 100644 --- a/drivers/net/ena/base/ena_com.c +++ b/drivers/net/ena/base/ena_com.c @@ -42,9 +42,6 @@ #define ENA_ASYNC_QUEUE_DEPTH 4 #define ENA_ADMIN_QUEUE_DEPTH 32 -#define ENA_EXTENDED_STAT_GET_FUNCT(_funct_queue) (_funct_queue & 0xFFFF) -#define ENA_EXTENDED_STAT_GET_QUEUE(_funct_queue) (_funct_queue >> 16) - #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ | (ENA_COMMON_SPEC_VERSION_MINOR)) @@ -201,12 +198,16 @@ static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, u16 command_id, bool capture) { - ENA_ASSERT(command_id < queue->q_depth, - "command id is larger than the queue size. cmd_id: %u queue size %d\n", - command_id, queue->q_depth); + if (unlikely(command_id >= queue->q_depth)) { + ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", + command_id, queue->q_depth); + return NULL; + } - ENA_ASSERT(!(queue->comp_ctx[command_id].occupied && capture), - "Completion context is occupied"); + if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { + ena_trc_err("Completion context is occupied\n"); + return NULL; + } if (capture) { ATOMIC32_INC(&queue->outstanding_cmds); @@ -290,7 +291,8 @@ static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) for (i = 0; i < queue->q_depth; i++) { comp_ctx = get_comp_ctxt(queue, i, false); - ENA_WAIT_EVENT_INIT(comp_ctx->wait_event); + if (comp_ctx) + ENA_WAIT_EVENT_INIT(comp_ctx->wait_event); } return 0; @@ -315,15 +317,21 @@ ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, cmd_size_in_bytes, comp, comp_size_in_bytes); + if (unlikely(IS_ERR(comp_ctx))) + admin_queue->running_state = false; ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags); return comp_ctx; } static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx, struct ena_com_io_sq *io_sq) { size_t size; + int dev_node; + + ENA_TOUCH(ctx); memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); @@ -334,15 +342,29 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, size = io_sq->desc_entry_size * io_sq->q_depth; - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) - ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, - size, - io_sq->desc_addr.virt_addr, - io_sq->desc_addr.phys_addr, - io_sq->desc_addr.mem_handle); - else - io_sq->desc_addr.virt_addr = - ENA_MEM_ALLOC(ena_dev->dmadev, size); + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) { + ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, + size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr, + ctx->numa_node, + dev_node); + if (!io_sq->desc_addr.virt_addr) + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + size, + io_sq->desc_addr.virt_addr, + io_sq->desc_addr.phys_addr, + io_sq->desc_addr.mem_handle); + } else { + ENA_MEM_ALLOC_NODE(ena_dev->dmadev, + size, + io_sq->desc_addr.virt_addr, + ctx->numa_node, + dev_node); + if (!io_sq->desc_addr.virt_addr) + io_sq->desc_addr.virt_addr = + ENA_MEM_ALLOC(ena_dev->dmadev, size); + } if (!io_sq->desc_addr.virt_addr) { ena_trc_err("memory allocation failed"); @@ -357,10 +379,13 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, } static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx, struct ena_com_io_cq *io_cq) { size_t size; + int prev_node; + ENA_TOUCH(ctx); memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); /* Use the basic completion descriptor for Rx */ @@ -371,11 +396,18 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; - ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, - size, - io_cq->cdesc_addr.virt_addr, - io_cq->cdesc_addr.phys_addr, - io_cq->cdesc_addr.mem_handle); + ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev, + size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr, + ctx->numa_node, + prev_node); + if (!io_cq->cdesc_addr.virt_addr) + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, + size, + io_cq->cdesc_addr.virt_addr, + io_cq->cdesc_addr.phys_addr, + io_cq->cdesc_addr.mem_handle); if (!io_cq->cdesc_addr.virt_addr) { ena_trc_err("memory allocation failed"); @@ -399,6 +431,11 @@ ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue, ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK; comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false); + if (unlikely(!comp_ctx)) { + ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n"); + admin_queue->running_state = false; + return; + } comp_ctx->status = ENA_CMD_COMPLETED; comp_ctx->comp_status = cqe->acq_common_descriptor.status; @@ -615,10 +652,12 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) goto err; } - ENA_ASSERT(read_resp->reg_off == offset, - "Invalid MMIO read return value"); - - ret = read_resp->reg_val; + if (read_resp->reg_off != offset) { + ena_trc_err("reading failed for wrong offset value"); + ret = ENA_MMIO_READ_TIMEOUT; + } else { + ret = read_resp->reg_val; + } err: ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags); @@ -838,7 +877,7 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) return 0; } -static int ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) +static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; @@ -849,7 +888,6 @@ static int ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) rss->hash_key_dma_addr, rss->hash_key_mem_handle); rss->hash_key = NULL; - return 0; } static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) @@ -862,10 +900,13 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) rss->hash_ctrl_dma_addr, rss->hash_ctrl_mem_handle); + if (unlikely(!rss->hash_ctrl)) + return ENA_COM_NO_MEM; + return 0; } -static int ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) +static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; @@ -876,8 +917,6 @@ static int ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) rss->hash_ctrl_dma_addr, rss->hash_ctrl_mem_handle); rss->hash_ctrl = NULL; - - return 0; } static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, @@ -902,7 +941,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, return ENA_COM_INVAL; } - tbl_size = (1 << log_size) * + tbl_size = (1ULL << log_size) * sizeof(struct ena_admin_rss_ind_table_entry); ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, @@ -913,7 +952,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, if (unlikely(!rss->rss_ind_tbl)) goto mem_err1; - tbl_size = (1 << log_size) * sizeof(u16); + tbl_size = (1ULL << log_size) * sizeof(u16); rss->host_rss_ind_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size); if (unlikely(!rss->host_rss_ind_tbl)) @@ -924,7 +963,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, return 0; mem_err2: - tbl_size = (1 << log_size) * + tbl_size = (1ULL << log_size) * sizeof(struct ena_admin_rss_ind_table_entry); ENA_MEM_FREE_COHERENT(ena_dev->dmadev, @@ -938,10 +977,10 @@ mem_err1: return ENA_COM_NO_MEM; } -static int ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) +static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; - size_t tbl_size = (1 << rss->tbl_log_size) * + size_t tbl_size = (1ULL << rss->tbl_log_size) * sizeof(struct ena_admin_rss_ind_table_entry); if (rss->rss_ind_tbl) @@ -955,8 +994,6 @@ static int ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev) if (rss->host_rss_ind_tbl) ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl); rss->host_rss_ind_tbl = NULL; - - return 0; } static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, @@ -1059,17 +1096,18 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev) static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) { - u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { -1 }; + u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; struct ena_rss *rss = &ena_dev->rss; - u16 idx, i; + u8 idx; + u16 i; for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; for (i = 0; i < 1 << rss->tbl_log_size; i++) { - idx = rss->rss_ind_tbl[i].cq_idx; - if (idx > ENA_TOTAL_NUM_QUEUES) + if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) return ENA_COM_INVAL; + idx = (u8)rss->rss_ind_tbl[i].cq_idx; if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) return ENA_COM_INVAL; @@ -1097,7 +1135,7 @@ static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, - unsigned int intr_delay_resolution) + u16 intr_delay_resolution) { struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; unsigned int i; @@ -1189,23 +1227,19 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, } io_cq->idx = cmd_completion.cq_idx; - io_cq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + - cmd_completion.cq_doorbell_offset); - - if (io_cq->q_depth != cmd_completion.cq_actual_depth) { - ena_trc_err("completion actual queue size (%d) is differ from requested size (%d)\n", - cmd_completion.cq_actual_depth, io_cq->q_depth); - ena_com_destroy_io_cq(ena_dev, io_cq); - return ENA_COM_NO_SPACE; - } io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + - cmd_completion.cq_interrupt_unmask_register); + cmd_completion.cq_interrupt_unmask_register_offset); - if (cmd_completion.cq_head_db_offset) + if (cmd_completion.cq_head_db_register_offset) io_cq->cq_head_db_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + - cmd_completion.cq_head_db_offset); + cmd_completion.cq_head_db_register_offset); + + if (cmd_completion.numa_node_register_offset) + io_cq->numa_node_cfg_reg = + (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + + cmd_completion.numa_node_register_offset); ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); @@ -1239,6 +1273,9 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev) for (i = 0; i < admin_queue->q_depth; i++) { comp_ctx = get_comp_ctxt(admin_queue, i, false); + if (unlikely(!comp_ctx)) + break; + comp_ctx->status = ENA_CMD_ABORTED; ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event); @@ -1304,7 +1341,7 @@ void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev) { u16 depth = ena_dev->aenq.q_depth; - ENA_ASSERT(ena_dev->aenq.head == depth, "Invliad AENQ state\n"); + ENA_ASSERT(ena_dev->aenq.head == depth, "Invalid AENQ state\n"); /* Init head_db to mark that all entries in the queue * are initially available @@ -1556,7 +1593,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { ena_trc_err("Device isn't ready, abort com init\n"); - return -1; + return ENA_COM_NO_DEVICE; } admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH; @@ -1631,50 +1668,46 @@ error: } int ena_com_create_io_queue(struct ena_com_dev *ena_dev, - u16 qid, - enum queue_direction direction, - enum ena_admin_placement_policy_type mem_queue_type, - u32 msix_vector, - u16 queue_size) + struct ena_com_create_io_ctx *ctx) { struct ena_com_io_sq *io_sq; struct ena_com_io_cq *io_cq; int ret = 0; - if (qid >= ENA_TOTAL_NUM_QUEUES) { + if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n", - qid, ENA_TOTAL_NUM_QUEUES); + ctx->qid, ENA_TOTAL_NUM_QUEUES); return ENA_COM_INVAL; } - io_sq = &ena_dev->io_sq_queues[qid]; - io_cq = &ena_dev->io_cq_queues[qid]; + io_sq = &ena_dev->io_sq_queues[ctx->qid]; + io_cq = &ena_dev->io_cq_queues[ctx->qid]; memset(io_sq, 0x0, sizeof(struct ena_com_io_sq)); memset(io_cq, 0x0, sizeof(struct ena_com_io_cq)); /* Init CQ */ - io_cq->q_depth = queue_size; - io_cq->direction = direction; - io_cq->qid = qid; + io_cq->q_depth = ctx->queue_size; + io_cq->direction = ctx->direction; + io_cq->qid = ctx->qid; - io_cq->msix_vector = msix_vector; + io_cq->msix_vector = ctx->msix_vector; - io_sq->q_depth = queue_size; - io_sq->direction = direction; - io_sq->qid = qid; + io_sq->q_depth = ctx->queue_size; + io_sq->direction = ctx->direction; + io_sq->qid = ctx->qid; - io_sq->mem_queue_type = mem_queue_type; + io_sq->mem_queue_type = ctx->mem_queue_type; - if (direction == ENA_COM_IO_QUEUE_DIRECTION_TX) + if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) /* header length is limited to 8 bits */ io_sq->tx_max_header_size = - ENA_MIN16(ena_dev->tx_max_header_size, SZ_256); + ENA_MIN32(ena_dev->tx_max_header_size, SZ_256); - ret = ena_com_init_io_sq(ena_dev, io_sq); + ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); if (ret) goto error; - ret = ena_com_init_io_cq(ena_dev, io_cq); + ret = ena_com_init_io_cq(ena_dev, ctx, io_cq); if (ret) goto error; @@ -1840,22 +1873,6 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) + ENA_REGS_AENQ_HEAD_DB_OFF); } -/* Sets the function Idx and Queue Idx to be used for - * get full statistics feature - */ -int ena_com_extended_stats_set_func_queue(struct ena_com_dev *ena_dev, - u32 func_queue) -{ - /* Function & Queue is acquired from user in the following format : - * Bottom Half word: funct - * Top Half Word: queue - */ - ena_dev->stats_func = ENA_EXTENDED_STAT_GET_FUNCT(func_queue); - ena_dev->stats_queue = ENA_EXTENDED_STAT_GET_QUEUE(func_queue); - - return 0; -} - int ena_com_dev_reset(struct ena_com_dev *ena_dev) { u32 stat, timeout, cap, reset_val; @@ -2195,7 +2212,7 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev, *func = rss->hash_func; if (key) - memcpy(key, hash_key->key, hash_key->keys_num << 2); + memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); return 0; } @@ -2337,7 +2354,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, u16 supported_fields; int rc; - if (proto > ENA_ADMIN_RSS_PROTO_NUM) { + if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { ena_trc_err("Invalid proto num (%u)\n", proto); return ENA_COM_INVAL; } @@ -2420,7 +2437,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) return ret; } - cmd.control_buffer.length = (1 << rss->tbl_log_size) * + cmd.control_buffer.length = (1ULL << rss->tbl_log_size) * sizeof(struct ena_admin_rss_ind_table_entry); ret = ena_com_execute_admin_command(admin_queue, @@ -2444,7 +2461,7 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl) u32 tbl_size; int i, rc; - tbl_size = (1 << rss->tbl_log_size) * + tbl_size = (1ULL << rss->tbl_log_size) * sizeof(struct ena_admin_rss_ind_table_entry); rc = ena_com_get_feature_ex(ena_dev, &get_resp, @@ -2496,22 +2513,18 @@ err_indr_tbl: return rc; } -int ena_com_rss_destroy(struct ena_com_dev *ena_dev) +void ena_com_rss_destroy(struct ena_com_dev *ena_dev) { ena_com_indirect_table_destroy(ena_dev); ena_com_hash_key_destroy(ena_dev); ena_com_hash_ctrl_destroy(ena_dev); memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss)); - - return 0; } -int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev, - u32 debug_area_size) +int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; - int rc; ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, SZ_4K, @@ -2521,33 +2534,29 @@ int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev, if (unlikely(!host_attr->host_info)) return ENA_COM_NO_MEM; - if (debug_area_size) { + return 0; +} + +int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, + u32 debug_area_size) { + struct ena_host_attribute *host_attr = &ena_dev->host_attr; + ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev, debug_area_size, host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr, host_attr->debug_area_dma_handle); if (unlikely(!host_attr->debug_area_virt_addr)) { - rc = ENA_COM_NO_MEM; - goto err; - } + host_attr->debug_area_size = 0; + return ENA_COM_NO_MEM; } host_attr->debug_area_size = debug_area_size; return 0; -err: - - ENA_MEM_FREE_COHERENT(ena_dev->dmadev, - SZ_4K, - host_attr->host_info, - host_attr->host_info_dma_addr, - host_attr->host_info_dma_handle); - host_attr->host_info = NULL; - return rc; } -void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev) +void ena_com_delete_host_info(struct ena_com_dev *ena_dev) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; @@ -2559,6 +2568,11 @@ void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev) host_attr->host_info_dma_handle); host_attr->host_info = NULL; } +} + +void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) +{ + struct ena_host_attribute *host_attr = &ena_dev->host_attr; if (host_attr->debug_area_virt_addr) { ENA_MEM_FREE_COHERENT(ena_dev->dmadev, @@ -2677,7 +2691,7 @@ void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) { struct ena_admin_get_feat_resp get_resp; - u32 delay_resolution; + u16 delay_resolution; int rc; rc = ena_com_get_feature(ena_dev, &get_resp, diff --git a/drivers/net/ena/base/ena_com.h b/drivers/net/ena/base/ena_com.h index 19e53ffb..e5345926 100644 --- a/drivers/net/ena/base/ena_com.h +++ b/drivers/net/ena/base/ena_com.h @@ -120,8 +120,8 @@ struct ena_com_rx_buf_info { }; struct ena_com_io_desc_addr { - void __iomem *pbuf_dev_addr; /* LLQ address */ - void *virt_addr; + u8 __iomem *pbuf_dev_addr; /* LLQ address */ + u8 *virt_addr; dma_addr_t phys_addr; ena_mem_handle_t mem_handle; }; @@ -138,13 +138,14 @@ struct ena_com_tx_meta { struct ena_com_io_cq { struct ena_com_io_desc_addr cdesc_addr; - u32 __iomem *db_addr; - /* Interrupt unmask register */ u32 __iomem *unmask_reg; /* The completion queue head doorbell register */ - uint32_t __iomem *cq_head_db_reg; + u32 __iomem *cq_head_db_reg; + + /* numa configuration register (for TPH) */ + u32 __iomem *numa_node_cfg_reg; /* The value to write to the above register to unmask * the interrupt of this queue @@ -189,7 +190,7 @@ struct ena_com_io_sq { u16 idx; u16 tail; u16 next_to_comp; - u16 tx_max_header_size; + u32 tx_max_header_size; u8 phase; u8 desc_entry_size; u8 dma_addr_bits; @@ -312,17 +313,15 @@ struct ena_com_dev { struct ena_com_aenq aenq; struct ena_com_io_cq io_cq_queues[ENA_TOTAL_NUM_QUEUES]; struct ena_com_io_sq io_sq_queues[ENA_TOTAL_NUM_QUEUES]; - void __iomem *reg_bar; + u8 __iomem *reg_bar; void __iomem *mem_bar; void *dmadev; enum ena_admin_placement_policy_type tx_mem_queue_type; - + u32 tx_max_header_size; u16 stats_func; /* Selected function for extended statistic dump */ u16 stats_queue; /* Selected queue for extended statistic dump */ - u16 tx_max_header_size; - struct ena_com_mmio_read mmio_read; struct ena_rss rss; @@ -343,6 +342,15 @@ struct ena_com_dev_get_features_ctx { struct ena_admin_feature_offload_desc offload; }; +struct ena_com_create_io_ctx { + enum ena_admin_placement_policy_type mem_queue_type; + enum queue_direction direction; + int numa_node; + u32 msix_vector; + u16 queue_size; + u16 qid; +}; + typedef void (*ena_aenq_handler)(void *data, struct ena_admin_aenq_entry *aenq_e); @@ -420,22 +428,14 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev); /* ena_com_create_io_queue - Create io queue. * @ena_dev: ENA communication layer struct - * @qid - the caller virtual queue id. - * @direction - the queue direction (Rx/Tx) - * @mem_queue_type - Indicate if this queue is LLQ or regular queue - * (relevant only for Tx queue) - * @msix_vector - MSI-X vector - * @queue_size - queue size + * ena_com_create_io_ctx - create context structure * - * Create the submission and the completion queues for queue id - qid. + * Create the submission and the completion queues. * * @return - 0 on success, negative value on failure. */ -int ena_com_create_io_queue(struct ena_com_dev *ena_dev, u16 qid, - enum queue_direction direction, - enum ena_admin_placement_policy_type mem_queue_type, - u32 msix_vector, - u16 queue_size); +int ena_com_create_io_queue(struct ena_com_dev *ena_dev, + struct ena_com_create_io_ctx *ctx); /* ena_com_admin_destroy - Destroy IO queue with the queue id - qid. * @ena_dev: ENA communication layer struct @@ -519,7 +519,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data); * @ena_dev: ENA communication layer struct * * This method aborts all the outstanding admin commands. - * The called should then call ena_com_wait_for_abort_completion to make sure + * The caller should then call ena_com_wait_for_abort_completion to make sure * all the commands were completed. */ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev); @@ -628,10 +628,8 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size); * @ena_dev: ENA communication layer struct * * Free all the RSS/RFS resources. - * - * @return: 0 on Success and negative value otherwise. */ -int ena_com_rss_destroy(struct ena_com_dev *ena_dev); +void ena_com_rss_destroy(struct ena_com_dev *ena_dev); /* ena_com_fill_hash_function - Fill RSS hash function * @ena_dev: ENA communication layer struct @@ -774,26 +772,38 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev); */ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl); -/* ena_com_allocate_host_attribute - Allocate host attributes resources. +/* ena_com_allocate_host_info - Allocate host info resources. * @ena_dev: ENA communication layer struct - * @debug_area_size: Debug aread size * - * Allocate host info and debug area. + * @return: 0 on Success and negative value otherwise. + */ +int ena_com_allocate_host_info(struct ena_com_dev *ena_dev); + +/* ena_com_allocate_debug_area - Allocate debug area. + * @ena_dev: ENA communication layer struct + * @debug_area_size - debug area size. * * @return: 0 on Success and negative value otherwise. */ -int ena_com_allocate_host_attribute(struct ena_com_dev *ena_dev, - u32 debug_area_size); +int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev, + u32 debug_area_size); + +/* ena_com_delete_debug_area - Free the debug area resources. + * @ena_dev: ENA communication layer struct + * + * Free the allocate debug area. + */ +void ena_com_delete_debug_area(struct ena_com_dev *ena_dev); -/* ena_com_allocate_host_attribute - Free the host attributes resources. +/* ena_com_delete_host_info - Free the host info resources. * @ena_dev: ENA communication layer struct * - * Free the allocate host info and debug area. + * Free the allocate host info. */ -void ena_com_delete_host_attribute(struct ena_com_dev *ena_dev); +void ena_com_delete_host_info(struct ena_com_dev *ena_dev); /* ena_com_set_host_attributes - Update the device with the host - * attributes base address. + * attributes (debug area and host info) base address. * @ena_dev: ENA communication layer struct * * @return: 0 on Success and negative value otherwise. @@ -979,7 +989,7 @@ ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev, */ return; - curr_moder_idx = (enum ena_intr_moder_level)*moder_tbl_idx; + curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx); if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) { ena_trc_err("Wrong moderation index %u\n", curr_moder_idx); return; diff --git a/drivers/net/ena/base/ena_defs/ena_admin_defs.h b/drivers/net/ena/base/ena_defs/ena_admin_defs.h index fe412469..7a031d90 100644 --- a/drivers/net/ena/base/ena_defs/ena_admin_defs.h +++ b/drivers/net/ena/base/ena_defs/ena_admin_defs.h @@ -58,30 +58,6 @@ enum ena_admin_aq_opcode { ENA_ADMIN_GET_STATS = 11, }; -/* privileged amdin commands opcodes */ -enum ena_admin_aq_opcode_privileged { - /* get device capabilities */ - ENA_ADMIN_IDENTIFY = 48, - - /* configure device */ - ENA_ADMIN_CONFIGURE_PF_DEVICE = 49, - - /* setup SRIOV PCIe Virtual Function capabilities */ - ENA_ADMIN_SETUP_VF = 50, - - /* load firmware to the controller */ - ENA_ADMIN_LOAD_FIRMWARE = 52, - - /* commit previously loaded firmare */ - ENA_ADMIN_COMMIT_FIRMWARE = 53, - - /* quiesce virtual function */ - ENA_ADMIN_QUIESCE_VF = 54, - - /* load virtual function from migrates context */ - ENA_ADMIN_MIGRATE_VF = 55, -}; - /* admin command completion status codes */ enum ena_admin_aq_completion_status { /* Request completed successfully */ @@ -116,25 +92,6 @@ enum ena_admin_aq_feature_id { /* max number of supported queues per for every queues type */ ENA_ADMIN_MAX_QUEUES_NUM = 2, - /* low latency queues capabilities (max entry size, depth) */ - ENA_ADMIN_LLQ_CONFIG = 3, - - /* power management capabilities */ - ENA_ADMIN_POWER_MANAGEMENT_CONFIG = 4, - - /* MAC address filters support, multicast, broadcast, and - * promiscuous - */ - ENA_ADMIN_MAC_FILTERS_CONFIG = 5, - - /* VLAN membership, frame format, etc. */ - ENA_ADMIN_VLAN_CONFIG = 6, - - /* Available size for various on-chip memory resources, accessible - * by the driver - */ - ENA_ADMIN_ON_DEVICE_MEMORY_CONFIG = 7, - /* Receive Side Scaling (RSS) function */ ENA_ADMIN_RSS_HASH_FUNCTION = 10, @@ -150,20 +107,9 @@ enum ena_admin_aq_feature_id { /* Receive Side Scaling (RSS) hash input */ ENA_ADMIN_RSS_HASH_INPUT = 18, - /* overlay tunnels configuration */ - ENA_ADMIN_TUNNEL_CONFIG = 19, - /* interrupt moderation parameters */ ENA_ADMIN_INTERRUPT_MODERATION = 20, - /* 1588v2 and Timing configuration */ - ENA_ADMIN_1588_CONFIG = 21, - - /* Packet Header format templates configuration for input and - * output parsers - */ - ENA_ADMIN_PKT_HEADER_TEMPLATES_CONFIG = 23, - /* AENQ configuration */ ENA_ADMIN_AENQ_CONFIG = 26, @@ -440,9 +386,7 @@ struct ena_admin_acq_create_sq_resp_desc { uint16_t reserved; - /* word 3 : queue doorbell address as and offset to PCIe MMIO REG - * BAR - */ + /* word 3 : queue doorbell address as an offset to PCIe MMIO REG BAR */ uint32_t sq_doorbell_offset; /* word 4 : low latency queue ring base address as an offset to @@ -520,18 +464,18 @@ struct ena_admin_acq_create_cq_resp_desc { /* actual cq depth in # of entries */ uint16_t cq_actual_depth; - /* word 3 : doorbell address as an offset to PCIe MMIO REG BAR */ - uint32_t cq_doorbell_offset; + /* word 3 : cpu numa node address as an offset to PCIe MMIO REG BAR */ + uint32_t numa_node_register_offset; /* word 4 : completion head doorbell address as an offset to PCIe * MMIO REG BAR */ - uint32_t cq_head_db_offset; + uint32_t cq_head_db_register_offset; /* word 5 : interrupt unmask register address as an offset into * PCIe MMIO REG BAR */ - uint32_t cq_interrupt_unmask_register; + uint32_t cq_interrupt_unmask_register_offset; }; /* ENA AQ Destroy Completion Queue command. Placed in control buffer @@ -724,7 +668,7 @@ struct ena_admin_queue_feature_desc { /* ENA MTU Set Feature descriptor. */ struct ena_admin_set_feature_mtu_desc { - /* word 0 : mtu size including L2 */ + /* word 0 : mtu payload size (exclude L2) */ uint32_t mtu; }; @@ -913,10 +857,7 @@ struct ena_admin_proto_input { /* flow hash fields (bitwise according to ena_admin_flow_hash_fields) */ uint16_t fields; - /* 0 : inner - for tunneled packet, select the fields - * from inner header - */ - uint16_t flags; + uint16_t reserved2; }; /* ENA RSS hash control buffer structure */ @@ -927,11 +868,9 @@ struct ena_admin_feature_rss_hash_control { /* selected input fields */ struct ena_admin_proto_input selected_fields[ENA_ADMIN_RSS_PROTO_NUM]; - /* supported input fields for inner header */ - struct ena_admin_proto_input supported_inner_fields[ENA_ADMIN_RSS_PROTO_NUM]; + struct ena_admin_proto_input reserved2[ENA_ADMIN_RSS_PROTO_NUM]; - /* selected input fields */ - struct ena_admin_proto_input selected_inner_fields[ENA_ADMIN_RSS_PROTO_NUM]; + struct ena_admin_proto_input reserved3[ENA_ADMIN_RSS_PROTO_NUM]; }; /* ENA RSS flow hash input */ @@ -966,10 +905,10 @@ enum ena_admin_os_type { ENA_ADMIN_OS_DPDK = 3, /* FreeBSD OS */ - ENA_ADMIN_OS_FREE_BSD = 4, + ENA_ADMIN_OS_FREEBSD = 4, /* PXE OS */ - ENA_ADMIN_OS_PXE = 5, + ENA_ADMIN_OS_IPXE = 5, }; /* host info */ @@ -1284,9 +1223,6 @@ struct ena_admin_ena_mmio_req_read_less_resp { #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_FUNCTION_SELECTED_FUNC_MASK \ GENMASK(7, 0) -/* proto_input */ -#define ENA_ADMIN_PROTO_INPUT_INNER_MASK BIT(0) - /* feature_rss_flow_hash_input */ #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT 1 #define ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK BIT(1) @@ -1816,34 +1752,21 @@ set_ena_admin_feature_rss_flow_hash_function_selected_func( } static inline uint16_t -get_ena_admin_proto_input_inner(const struct ena_admin_proto_input *p) -{ - return p->flags & ENA_ADMIN_PROTO_INPUT_INNER_MASK; -} - -static inline void -set_ena_admin_proto_input_inner(struct ena_admin_proto_input *p, uint16_t val) -{ - p->flags |= val & ENA_ADMIN_PROTO_INPUT_INNER_MASK; -} - -static inline uint16_t get_ena_admin_feature_rss_flow_hash_input_L3_sort( const struct ena_admin_feature_rss_flow_hash_input *p) { return (p->supported_input_sort & - ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) + ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK) >> ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT; } static inline void set_ena_admin_feature_rss_flow_hash_input_L3_sort( - struct ena_admin_feature_rss_flow_hash_input *p, - uint16_t val) + struct ena_admin_feature_rss_flow_hash_input *p, uint16_t val) { p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_SHIFT) - & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK; + & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK; } static inline uint16_t @@ -1862,7 +1785,7 @@ set_ena_admin_feature_rss_flow_hash_input_L4_sort( { p->supported_input_sort |= (val << ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_SHIFT) - & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; + & ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK; } static inline uint16_t diff --git a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h index a547033d..6bc3d6a7 100644 --- a/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h +++ b/drivers/net/ena/base/ena_defs/ena_eth_io_defs.h @@ -87,28 +87,17 @@ struct ena_eth_io_tx_desc { /* word 1 : */ /* ethernet control - * 3:0 : l3_proto_idx - L3 protocol, if - * tunnel_ctrl[0] is set, then this is the inner - * packet L3. This field required when - * l3_csum_en,l3_csum or tso_en are set. + * 3:0 : l3_proto_idx - L3 protocol. This field + * required when l3_csum_en,l3_csum or tso_en are set. * 4 : DF - IPv4 DF, must be 0 if packet is IPv4 and * DF flags of the IPv4 header is 0. Otherwise must * be set to 1 * 6:5 : reserved5 - * 7 : tso_en - Enable TSO, For TCP only. For packets - * with tunnel (tunnel_ctrl[0]=1), then the inner - * packet will be segmented while the outer tunnel is - * duplicated - * 12:8 : l4_proto_idx - L4 protocol, if - * tunnel_ctrl[0] is set, then this is the inner - * packet L4. This field need to be set when - * l4_csum_en or tso_en are set. - * 13 : l3_csum_en - enable IPv4 header checksum. if - * tunnel_ctrl[0] is set, then this will enable - * checksum for the inner packet IPv4 - * 14 : l4_csum_en - enable TCP/UDP checksum. if - * tunnel_ctrl[0] is set, then this will enable - * checksum on the inner packet TCP/UDP checksum + * 7 : tso_en - Enable TSO, For TCP only. + * 12:8 : l4_proto_idx - L4 protocol. This field need + * to be set when l4_csum_en or tso_en are set. + * 13 : l3_csum_en - enable IPv4 header checksum. + * 14 : l4_csum_en - enable TCP/UDP checksum. * 15 : ethernet_fcs_dis - when set, the controller * will not append the 802.3 Ethernet Frame Check * Sequence to the packet @@ -124,11 +113,8 @@ struct ena_eth_io_tx_desc { * must not include the tcp length field. L4 partial * checksum should be used for IPv6 packet that * contains Routing Headers. - * 20:18 : tunnel_ctrl - Bit 0: tunneling exists, Bit - * 1: tunnel packet actually uses UDP as L4, Bit 2: - * tunnel packet L3 protocol: 0: IPv4 1: IPv6 - * 21 : ts_req - Indicates that the packet is IEEE - * 1588v2 packet requiring the timestamp + * 20:18 : reserved18 - MBZ + * 21 : reserved21 - MBZ * 31:22 : req_id_lo - Request ID[9:0] */ uint32_t meta_ctrl; @@ -160,9 +146,7 @@ struct ena_eth_io_tx_meta_desc { /* word 0 : */ /* length, request id and control flags * 9:0 : req_id_lo - Request ID[9:0] - * 11:10 : outr_l3_off_hi - valid if - * tunnel_ctrl[0]=1. bits[4:3] of outer packet L3 - * offset + * 11:10 : reserved10 - MBZ * 12 : reserved12 - MBZ * 13 : reserved13 - MBZ * 14 : ext_valid - if set, offset fields in Word2 @@ -201,35 +185,19 @@ struct ena_eth_io_tx_meta_desc { /* word 2 : */ /* word 2 * 7:0 : l3_hdr_len - the header length L3 IP header. - * if tunnel_ctrl[0]=1, this is the IP header length - * of the inner packet. FIXME - check if includes IP - * options hdr_len * 15:8 : l3_hdr_off - the offset of the first byte * in the L3 header from the beginning of the to-be - * transmitted packet. if tunnel_ctrl[0]=1, this is - * the offset the L3 header of the inner packet + * transmitted packet. * 21:16 : l4_hdr_len_in_words - counts the L4 header * length in words. there is an explicit assumption * that L4 header appears right after L3 header and - * L4 offset is based on l3_hdr_off+l3_hdr_len FIXME - * - pls confirm + * L4 offset is based on l3_hdr_off+l3_hdr_len * 31:22 : mss_lo */ uint32_t word2; /* word 3 : */ - /* word 3 - * 23:0 : crypto_info - * 28:24 : outr_l3_hdr_len_words - valid if - * tunnel_ctrl[0]=1. Counts in words - * 31:29 : outr_l3_off_lo - valid if - * tunnel_ctrl[0]=1. bits[2:0] of outer packet L3 - * offset. Counts the offset of the tunnel IP header - * from beginning of the packet. NOTE: if the tunnel - * header requires CRC or checksum, it is expected to - * be done by the driver as it is not done by the HW - */ - uint32_t word3; + uint32_t reserved; }; /* ENA IO Queue Tx completions descriptor */ @@ -298,36 +266,26 @@ struct ena_eth_io_rx_cdesc_base { /* word 0 : */ /* 4:0 : l3_proto_idx - L3 protocol index * 6:5 : src_vlan_cnt - Source VLAN count - * 7 : tunnel - Tunnel exists + * 7 : reserved7 - MBZ * 12:8 : l4_proto_idx - L4 protocol index * 13 : l3_csum_err - when set, either the L3 * checksum error detected, or, the controller didn't - * validate the checksum, If tunnel exists, this - * result is for the inner packet. This bit is valid - * only when l3_proto_idx indicates IPv4 packet + * validate the checksum. This bit is valid only when + * l3_proto_idx indicates IPv4 packet * 14 : l4_csum_err - when set, either the L4 * checksum error detected, or, the controller didn't - * validate the checksum. If tunnel exists, this - * result is for the inner packet. This bit is valid - * only when l4_proto_idx indicates TCP/UDP packet, - * and, ipv4_frag is not set + * validate the checksum. This bit is valid only when + * l4_proto_idx indicates TCP/UDP packet, and, + * ipv4_frag is not set * 15 : ipv4_frag - Indicates IPv4 fragmented packet - * 17:16 : reserved16 - * 19:18 : reserved18 - * 20 : secured_pkt - Set if packet was handled by - * inline crypto engine - * 22:21 : crypto_status - bit 0 secured direction: - * 0: decryption, 1: encryption. bit 1 reserved - * 23 : reserved23 + * 23:16 : reserved16 * 24 : phase * 25 : l3_csum2 - second checksum engine result * 26 : first - Indicates first descriptor in * transaction * 27 : last - Indicates last descriptor in * transaction - * 28 : inr_l4_csum - TCP/UDP checksum results for - * inner packet - * 29 : reserved29 + * 29:28 : reserved28 * 30 : buffer - 0: Metadata descriptor. 1: Buffer * Descriptor was used * 31 : reserved31 @@ -381,6 +339,16 @@ struct ena_eth_io_intr_reg { uint32_t intr_control; }; +/* ENA NUMA Node configuration register */ +struct ena_eth_io_numa_node_cfg_reg { + /* word 0 : */ + /* 7:0 : numa + * 30:8 : reserved + * 31 : enabled + */ + uint32_t numa_cfg; +}; + /* tx_desc */ #define ENA_ETH_IO_TX_DESC_LENGTH_MASK GENMASK(15, 0) #define ENA_ETH_IO_TX_DESC_REQ_ID_HI_SHIFT 16 @@ -410,10 +378,6 @@ struct ena_eth_io_intr_reg { #define ENA_ETH_IO_TX_DESC_ETHERNET_FCS_DIS_MASK BIT(15) #define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_SHIFT 17 #define ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK BIT(17) -#define ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT 18 -#define ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK GENMASK(20, 18) -#define ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT 21 -#define ENA_ETH_IO_TX_DESC_TS_REQ_MASK BIT(21) #define ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT 22 #define ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK GENMASK(31, 22) #define ENA_ETH_IO_TX_DESC_ADDR_HI_MASK GENMASK(15, 0) @@ -422,8 +386,6 @@ struct ena_eth_io_intr_reg { /* tx_meta_desc */ #define ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK GENMASK(9, 0) -#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT 10 -#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK GENMASK(11, 10) #define ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT 14 #define ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK BIT(14) #define ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT 15 @@ -452,11 +414,6 @@ struct ena_eth_io_intr_reg { #define ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK GENMASK(21, 16) #define ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT 22 #define ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK GENMASK(31, 22) -#define ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK GENMASK(23, 0) -#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT 24 -#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK GENMASK(28, 24) -#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT 29 -#define ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK GENMASK(31, 29) /* tx_cdesc */ #define ENA_ETH_IO_TX_CDESC_PHASE_MASK BIT(0) @@ -474,8 +431,6 @@ struct ena_eth_io_intr_reg { #define ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK GENMASK(4, 0) #define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_SHIFT 5 #define ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK GENMASK(6, 5) -#define ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT 7 -#define ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK BIT(7) #define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT 8 #define ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK GENMASK(12, 8) #define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT 13 @@ -484,10 +439,6 @@ struct ena_eth_io_intr_reg { #define ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK BIT(14) #define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT 15 #define ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK BIT(15) -#define ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT 20 -#define ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK BIT(20) -#define ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT 21 -#define ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK GENMASK(22, 21) #define ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT 24 #define ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK BIT(24) #define ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT 25 @@ -496,8 +447,6 @@ struct ena_eth_io_intr_reg { #define ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK BIT(26) #define ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT 27 #define ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK BIT(27) -#define ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT 28 -#define ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK BIT(28) #define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT 30 #define ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK BIT(30) @@ -508,6 +457,11 @@ struct ena_eth_io_intr_reg { #define ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT 30 #define ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK BIT(30) +/* numa_node_cfg_reg */ +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK GENMASK(7, 0) +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT 31 +#define ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK BIT(31) + #if !defined(ENA_DEFS_LINUX_MAINLINE) static inline uint32_t get_ena_eth_io_tx_desc_length( const struct ena_eth_io_tx_desc *p) @@ -743,38 +697,6 @@ static inline void set_ena_eth_io_tx_desc_l4_csum_partial( & ENA_ETH_IO_TX_DESC_L4_CSUM_PARTIAL_MASK; } -static inline uint32_t get_ena_eth_io_tx_desc_tunnel_ctrl( - const struct ena_eth_io_tx_desc *p) -{ - return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK) - >> ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT; -} - -static inline void set_ena_eth_io_tx_desc_tunnel_ctrl( - struct ena_eth_io_tx_desc *p, - uint32_t val) -{ - p->meta_ctrl |= - (val << ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_SHIFT) - & ENA_ETH_IO_TX_DESC_TUNNEL_CTRL_MASK; -} - -static inline uint32_t get_ena_eth_io_tx_desc_ts_req( - const struct ena_eth_io_tx_desc *p) -{ - return (p->meta_ctrl & ENA_ETH_IO_TX_DESC_TS_REQ_MASK) - >> ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT; -} - -static inline void set_ena_eth_io_tx_desc_ts_req( - struct ena_eth_io_tx_desc *p, - uint32_t val) -{ - p->meta_ctrl |= - (val << ENA_ETH_IO_TX_DESC_TS_REQ_SHIFT) - & ENA_ETH_IO_TX_DESC_TS_REQ_MASK; -} - static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo( const struct ena_eth_io_tx_desc *p) { @@ -783,11 +705,9 @@ static inline uint32_t get_ena_eth_io_tx_desc_req_id_lo( } static inline void set_ena_eth_io_tx_desc_req_id_lo( - struct ena_eth_io_tx_desc *p, - uint32_t val) + struct ena_eth_io_tx_desc *p, uint32_t val) { - p->meta_ctrl |= - (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) + p->meta_ctrl |= (val << ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) & ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK; } @@ -833,22 +753,6 @@ static inline void set_ena_eth_io_tx_meta_desc_req_id_lo( p->len_ctrl |= val & ENA_ETH_IO_TX_META_DESC_REQ_ID_LO_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_off_hi( - const struct ena_eth_io_tx_meta_desc *p) -{ - return (p->len_ctrl & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK) - >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT; -} - -static inline void set_ena_eth_io_tx_meta_desc_outr_l3_off_hi( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) -{ - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_SHIFT) - & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_HI_MASK; -} - static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid( const struct ena_eth_io_tx_meta_desc *p) { @@ -857,11 +761,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_ext_valid( } static inline void set_ena_eth_io_tx_meta_desc_ext_valid( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) + struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_EXT_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK; } @@ -873,11 +775,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_word3_valid( } static inline void set_ena_eth_io_tx_meta_desc_word3_valid( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) + struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT) + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_WORD3_VALID_SHIFT) & ENA_ETH_IO_TX_META_DESC_WORD3_VALID_MASK; } @@ -889,11 +789,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_mss_hi_ptp( } static inline void set_ena_eth_io_tx_meta_desc_mss_hi_ptp( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) + struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_SHIFT) & ENA_ETH_IO_TX_META_DESC_MSS_HI_PTP_MASK; } @@ -905,11 +803,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_eth_meta_type( } static inline void set_ena_eth_io_tx_meta_desc_eth_meta_type( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) + struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_SHIFT) & ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK; } @@ -921,11 +817,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_store( } static inline void set_ena_eth_io_tx_meta_desc_meta_store( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) + struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_STORE_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_STORE_MASK; } @@ -937,11 +831,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_meta_desc( } static inline void set_ena_eth_io_tx_meta_desc_meta_desc( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) + struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_META_DESC_SHIFT) & ENA_ETH_IO_TX_META_DESC_META_DESC_MASK; } @@ -953,11 +845,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_phase( } static inline void set_ena_eth_io_tx_meta_desc_phase( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) + struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) & ENA_ETH_IO_TX_META_DESC_PHASE_MASK; } @@ -969,11 +859,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_first( } static inline void set_ena_eth_io_tx_meta_desc_first( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) + struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_FIRST_SHIFT) & ENA_ETH_IO_TX_META_DESC_FIRST_MASK; } @@ -985,11 +873,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_last( } static inline void set_ena_eth_io_tx_meta_desc_last( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) + struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_LAST_SHIFT) & ENA_ETH_IO_TX_META_DESC_LAST_MASK; } @@ -1001,11 +887,9 @@ static inline uint32_t get_ena_eth_io_tx_meta_desc_comp_req( } static inline void set_ena_eth_io_tx_meta_desc_comp_req( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) + struct ena_eth_io_tx_meta_desc *p, uint32_t val) { - p->len_ctrl |= - (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) + p->len_ctrl |= (val << ENA_ETH_IO_TX_META_DESC_COMP_REQ_SHIFT) & ENA_ETH_IO_TX_META_DESC_COMP_REQ_MASK; } @@ -1083,51 +967,6 @@ static inline void set_ena_eth_io_tx_meta_desc_mss_lo( & ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK; } -static inline uint32_t get_ena_eth_io_tx_meta_desc_crypto_info( - const struct ena_eth_io_tx_meta_desc *p) -{ - return p->word3 & ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK; -} - -static inline void set_ena_eth_io_tx_meta_desc_crypto_info( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) -{ - p->word3 |= val & ENA_ETH_IO_TX_META_DESC_CRYPTO_INFO_MASK; -} - -static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_hdr_len_words( - const struct ena_eth_io_tx_meta_desc *p) -{ - return (p->word3 & ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK) - >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT; -} - -static inline void set_ena_eth_io_tx_meta_desc_outr_l3_hdr_len_words( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) -{ - p->word3 |= - (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_SHIFT) - & ENA_ETH_IO_TX_META_DESC_OUTR_L3_HDR_LEN_WORDS_MASK; -} - -static inline uint32_t get_ena_eth_io_tx_meta_desc_outr_l3_off_lo( - const struct ena_eth_io_tx_meta_desc *p) -{ - return (p->word3 & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK) - >> ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT; -} - -static inline void set_ena_eth_io_tx_meta_desc_outr_l3_off_lo( - struct ena_eth_io_tx_meta_desc *p, - uint32_t val) -{ - p->word3 |= - (val << ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_SHIFT) - & ENA_ETH_IO_TX_META_DESC_OUTR_L3_OFF_LO_MASK; -} - static inline uint8_t get_ena_eth_io_tx_cdesc_phase( const struct ena_eth_io_tx_cdesc *p) { @@ -1231,22 +1070,6 @@ static inline void set_ena_eth_io_rx_cdesc_base_src_vlan_cnt( & ENA_ETH_IO_RX_CDESC_BASE_SRC_VLAN_CNT_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_tunnel( - const struct ena_eth_io_rx_cdesc_base *p) -{ - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT; -} - -static inline void set_ena_eth_io_rx_cdesc_base_tunnel( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) -{ - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_TUNNEL_MASK; -} - static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx( const struct ena_eth_io_rx_cdesc_base *p) { @@ -1255,11 +1078,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_proto_idx( } static inline void set_ena_eth_io_rx_cdesc_base_l4_proto_idx( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) + struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK; } @@ -1271,11 +1092,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum_err( } static inline void set_ena_eth_io_rx_cdesc_base_l3_csum_err( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) + struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK; } @@ -1287,11 +1106,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_l4_csum_err( } static inline void set_ena_eth_io_rx_cdesc_base_l4_csum_err( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) + struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK; } @@ -1303,46 +1120,12 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_ipv4_frag( } static inline void set_ena_eth_io_rx_cdesc_base_ipv4_frag( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) + struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_secured_pkt( - const struct ena_eth_io_rx_cdesc_base *p) -{ - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT; -} - -static inline void set_ena_eth_io_rx_cdesc_base_secured_pkt( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) -{ - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_SECURED_PKT_MASK; -} - -static inline uint32_t get_ena_eth_io_rx_cdesc_base_crypto_status( - const struct ena_eth_io_rx_cdesc_base *p) -{ - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT; -} - -static inline void set_ena_eth_io_rx_cdesc_base_crypto_status( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) -{ - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_CRYPTO_STATUS_MASK; -} - static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase( const struct ena_eth_io_rx_cdesc_base *p) { @@ -1351,11 +1134,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_phase( } static inline void set_ena_eth_io_rx_cdesc_base_phase( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) + struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK; } @@ -1367,11 +1148,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_l3_csum2( } static inline void set_ena_eth_io_rx_cdesc_base_l3_csum2( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) + struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM2_MASK; } @@ -1383,11 +1162,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_first( } static inline void set_ena_eth_io_rx_cdesc_base_first( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) + struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK; } @@ -1399,30 +1176,12 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_last( } static inline void set_ena_eth_io_rx_cdesc_base_last( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) + struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK; } -static inline uint32_t get_ena_eth_io_rx_cdesc_base_inr_l4_csum( - const struct ena_eth_io_rx_cdesc_base *p) -{ - return (p->status & ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK) - >> ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT; -} - -static inline void set_ena_eth_io_rx_cdesc_base_inr_l4_csum( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) -{ - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_SHIFT) - & ENA_ETH_IO_RX_CDESC_BASE_INR_L4_CSUM_MASK; -} - static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer( const struct ena_eth_io_rx_cdesc_base *p) { @@ -1431,11 +1190,9 @@ static inline uint32_t get_ena_eth_io_rx_cdesc_base_buffer( } static inline void set_ena_eth_io_rx_cdesc_base_buffer( - struct ena_eth_io_rx_cdesc_base *p, - uint32_t val) + struct ena_eth_io_rx_cdesc_base *p, uint32_t val) { - p->status |= - (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) + p->status |= (val << ENA_ETH_IO_RX_CDESC_BASE_BUFFER_SHIFT) & ENA_ETH_IO_RX_CDESC_BASE_BUFFER_MASK; } @@ -1446,8 +1203,7 @@ static inline uint32_t get_ena_eth_io_intr_reg_rx_intr_delay( } static inline void set_ena_eth_io_intr_reg_rx_intr_delay( - struct ena_eth_io_intr_reg *p, - uint32_t val) + struct ena_eth_io_intr_reg *p, uint32_t val) { p->intr_control |= val & ENA_ETH_IO_INTR_REG_RX_INTR_DELAY_MASK; } @@ -1460,11 +1216,9 @@ static inline uint32_t get_ena_eth_io_intr_reg_tx_intr_delay( } static inline void set_ena_eth_io_intr_reg_tx_intr_delay( - struct ena_eth_io_intr_reg *p, - uint32_t val) + struct ena_eth_io_intr_reg *p, uint32_t val) { - p->intr_control |= - (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) + p->intr_control |= (val << ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_SHIFT) & ENA_ETH_IO_INTR_REG_TX_INTR_DELAY_MASK; } @@ -1476,13 +1230,37 @@ static inline uint32_t get_ena_eth_io_intr_reg_intr_unmask( } static inline void set_ena_eth_io_intr_reg_intr_unmask( - struct ena_eth_io_intr_reg *p, - uint32_t val) + struct ena_eth_io_intr_reg *p, uint32_t val) { - p->intr_control |= - (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) + p->intr_control |= (val << ENA_ETH_IO_INTR_REG_INTR_UNMASK_SHIFT) & ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK; } +static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_numa( + const struct ena_eth_io_numa_node_cfg_reg *p) +{ + return p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK; +} + +static inline void set_ena_eth_io_numa_node_cfg_reg_numa( + struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val) +{ + p->numa_cfg |= val & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK; +} + +static inline uint32_t get_ena_eth_io_numa_node_cfg_reg_enabled( + const struct ena_eth_io_numa_node_cfg_reg *p) +{ + return (p->numa_cfg & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK) + >> ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT; +} + +static inline void set_ena_eth_io_numa_node_cfg_reg_enabled( + struct ena_eth_io_numa_node_cfg_reg *p, uint32_t val) +{ + p->numa_cfg |= (val << ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_SHIFT) + & ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; +} + #endif /* !defined(ENA_DEFS_LINUX_MAINLINE) */ #endif /*_ENA_ETH_IO_H_ */ diff --git a/drivers/net/ena/base/ena_defs/ena_gen_info.h b/drivers/net/ena/base/ena_defs/ena_gen_info.h index 4abdffed..3d252096 100644 --- a/drivers/net/ena/base/ena_defs/ena_gen_info.h +++ b/drivers/net/ena/base/ena_defs/ena_gen_info.h @@ -31,5 +31,5 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#define ENA_GEN_DATE "Mon Feb 15 14:33:08 IST 2016" -#define ENA_GEN_COMMIT "c71ec25" +#define ENA_GEN_DATE "Sun Jun 5 10:24:39 IDT 2016" +#define ENA_GEN_COMMIT "17146ed" diff --git a/drivers/net/ena/base/ena_eth_com.c b/drivers/net/ena/base/ena_eth_com.c index 459e0bbb..290a5666 100644 --- a/drivers/net/ena/base/ena_eth_com.c +++ b/drivers/net/ena/base/ena_eth_com.c @@ -62,7 +62,7 @@ static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq) /* Switch phase bit in case of wrap around */ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0)) - io_cq->phase = 1 - io_cq->phase; + io_cq->phase ^= 1; } static inline void *get_sq_desc(struct ena_com_io_sq *io_sq) @@ -97,7 +97,7 @@ static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq) /* Switch phase bit in case of wrap around */ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0)) - io_sq->phase = 1 - io_sq->phase; + io_sq->phase ^= 1; } static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, @@ -110,7 +110,10 @@ static inline int ena_com_write_header(struct ena_com_io_sq *io_sq, if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) return 0; - ENA_ASSERT(io_sq->header_addr, "header address is NULL\n"); + if (unlikely(!io_sq->header_addr)) { + ena_trc_err("Push buffer header ptr is NULL\n"); + return ENA_COM_INVAL; + } memcpy_toio(dev_head_addr, head_src, header_len); @@ -127,8 +130,7 @@ static inline struct ena_eth_io_rx_cdesc_base * } static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, - u16 *first_cdesc_idx, - u16 *nb_hw_desc) + u16 *first_cdesc_idx) { struct ena_eth_io_rx_cdesc_base *cdesc; u16 count = 0, head_masked; @@ -161,8 +163,7 @@ static inline int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, count = 0; } - *nb_hw_desc = count; - return 0; + return count; } static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq, @@ -408,21 +409,20 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, u16 cdesc_idx = 0; u16 nb_hw_desc; u16 i; - int rc; ENA_ASSERT(io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type"); - rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc); - if (rc || (nb_hw_desc == 0)) { + nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx); + if (nb_hw_desc == 0) { ena_rx_ctx->descs = nb_hw_desc; - return rc; + return 0; } ena_trc_dbg("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc); - if (unlikely(nb_hw_desc >= ena_rx_ctx->max_bufs)) { + if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { ena_trc_err("Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs); return ENA_COM_NO_SPACE; @@ -459,7 +459,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, "wrong Q type"); if (unlikely(ena_com_sq_empty_space(io_sq) == 0)) - return -1; + return ENA_COM_NO_SPACE; desc = get_sq_desc(io_sq); memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc)); @@ -496,9 +496,13 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) ((unsigned char *)io_cq->cdesc_addr.virt_addr + (masked_head * io_cq->cdesc_entry_size_in_bytes)); + /* When the current completion descriptor phase isn't the same as the + * expected, it mean that the device still didn't update + * this completion. + */ cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; if (cdesc_phase != expected_phase) - return -1; + return ENA_COM_TRY_AGAIN; ena_com_cq_inc_head(io_cq); diff --git a/drivers/net/ena/base/ena_eth_com.h b/drivers/net/ena/base/ena_eth_com.h index 325d69c0..71a880c0 100644 --- a/drivers/net/ena/base/ena_eth_com.h +++ b/drivers/net/ena/base/ena_eth_com.h @@ -142,6 +142,20 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq) return 0; } +static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, + u8 numa_node) +{ + struct ena_eth_io_numa_node_cfg_reg numa_cfg; + + if (!io_cq->numa_node_cfg_reg) + return; + + numa_cfg.numa_cfg = (numa_node & ENA_ETH_IO_NUMA_NODE_CFG_REG_NUMA_MASK) + | ENA_ETH_IO_NUMA_NODE_CFG_REG_ENABLED_MASK; + + ENA_REG_WRITE32(numa_cfg.numa_cfg, io_cq->numa_node_cfg_reg); +} + static inline void ena_com_comp_ack(struct ena_com_io_sq *io_sq, u16 elem) { io_sq->next_to_comp += elem; diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h index 5f693301..87c3bf13 100644 --- a/drivers/net/ena/base/ena_plat_dpdk.h +++ b/drivers/net/ena/base/ena_plat_dpdk.h @@ -62,10 +62,10 @@ typedef uint64_t dma_addr_t; #endif #define ena_atomic32_t rte_atomic32_t -#define ena_mem_handle_t void * +#define ena_mem_handle_t const struct rte_memzone * -#define SZ_256 (256) -#define SZ_4K (4096) +#define SZ_256 (256U) +#define SZ_4K (4096U) #define ENA_COM_OK 0 #define ENA_COM_NO_MEM -ENOMEM @@ -75,6 +75,7 @@ typedef uint64_t dma_addr_t; #define ENA_COM_PERMISSION -EPERM #define ENA_COM_TIMER_EXPIRED -ETIME #define ENA_COM_FAULT -EFAULT +#define ENA_COM_TRY_AGAIN -EAGAIN #define ____cacheline_aligned __rte_cache_aligned @@ -83,6 +84,7 @@ typedef uint64_t dma_addr_t; #define ENA_MSLEEP(x) rte_delay_ms(x) #define ENA_UDELAY(x) rte_delay_us(x) +#define ENA_TOUCH(x) ((void)(x)) #define memcpy_toio memcpy #define wmb rte_wmb #define rmb rte_wmb @@ -182,17 +184,45 @@ typedef uint64_t dma_addr_t; do { \ const struct rte_memzone *mz; \ char z_name[RTE_MEMZONE_NAMESIZE]; \ - (void)dmadev; (void)handle; \ + ENA_TOUCH(dmadev); ENA_TOUCH(handle); \ snprintf(z_name, sizeof(z_name), \ "ena_alloc_%d", ena_alloc_cnt++); \ mz = rte_memzone_reserve(z_name, size, SOCKET_ID_ANY, 0); \ + memset(mz->addr, 0, size); \ virt = mz->addr; \ phys = mz->phys_addr; \ + handle = mz; \ } while (0) #define ENA_MEM_FREE_COHERENT(dmadev, size, virt, phys, handle) \ - ({(void)size; rte_free(virt); }) + ({ ENA_TOUCH(size); ENA_TOUCH(phys); \ + ENA_TOUCH(dmadev); \ + rte_memzone_free(handle); }) + +#define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, node, dev_node) \ + do { \ + const struct rte_memzone *mz; \ + char z_name[RTE_MEMZONE_NAMESIZE]; \ + ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \ + snprintf(z_name, sizeof(z_name), \ + "ena_alloc_%d", ena_alloc_cnt++); \ + mz = rte_memzone_reserve(z_name, size, node, 0); \ + virt = mz->addr; \ + phys = mz->phys_addr; \ + } while (0) + +#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) \ + do { \ + const struct rte_memzone *mz; \ + char z_name[RTE_MEMZONE_NAMESIZE]; \ + ENA_TOUCH(dmadev); ENA_TOUCH(dev_node); \ + snprintf(z_name, sizeof(z_name), \ + "ena_alloc_%d", ena_alloc_cnt++); \ + mz = rte_memzone_reserve(z_name, size, node, 0); \ + virt = mz->addr; \ + } while (0) + #define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1) -#define ENA_MEM_FREE(dmadev, ptr) ({(void)dmadev; rte_free(ptr); }) +#define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); }) static inline void writel(u32 value, volatile void *addr) { diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index e157587b..ac0803d6 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -37,6 +37,8 @@ #include <rte_atomic.h> #include <rte_dev.h> #include <rte_errno.h> +#include <rte_version.h> +#include <rte_eal_memconfig.h> #include "ena_ethdev.h" #include "ena_logs.h" @@ -49,6 +51,10 @@ #include <ena_admin_defs.h> #include <ena_eth_io_defs.h> +#define DRV_MODULE_VER_MAJOR 1 +#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_SUBMINOR 0 + #define ENA_IO_TXQ_IDX(q) (2 * (q)) #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) /*reverse version of ENA_IO_RXQ_IDX*/ @@ -72,6 +78,89 @@ #define ENA_RX_RSS_TABLE_LOG_SIZE 7 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) #define ENA_HASH_KEY_SIZE 40 +#define ENA_ETH_SS_STATS 0xFF +#define ETH_GSTRING_LEN 32 + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; + +struct ena_stats { + char name[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define ENA_STAT_ENA_COM_ENTRY(stat) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ +} + +#define ENA_STAT_ENTRY(stat, stat_type) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ +} + +#define ENA_STAT_RX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, rx) + +#define ENA_STAT_TX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, tx) + +#define ENA_STAT_GLOBAL_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, dev) + +static const struct ena_stats ena_stats_global_strings[] = { + ENA_STAT_GLOBAL_ENTRY(tx_timeout), + ENA_STAT_GLOBAL_ENTRY(io_suspend), + ENA_STAT_GLOBAL_ENTRY(io_resume), + ENA_STAT_GLOBAL_ENTRY(wd_expired), + ENA_STAT_GLOBAL_ENTRY(interface_up), + ENA_STAT_GLOBAL_ENTRY(interface_down), + ENA_STAT_GLOBAL_ENTRY(admin_q_pause), +}; + +static const struct ena_stats ena_stats_tx_strings[] = { + ENA_STAT_TX_ENTRY(cnt), + ENA_STAT_TX_ENTRY(bytes), + ENA_STAT_TX_ENTRY(queue_stop), + ENA_STAT_TX_ENTRY(queue_wakeup), + ENA_STAT_TX_ENTRY(dma_mapping_err), + ENA_STAT_TX_ENTRY(linearize), + ENA_STAT_TX_ENTRY(linearize_failed), + ENA_STAT_TX_ENTRY(tx_poll), + ENA_STAT_TX_ENTRY(doorbells), + ENA_STAT_TX_ENTRY(prepare_ctx_err), + ENA_STAT_TX_ENTRY(missing_tx_comp), + ENA_STAT_TX_ENTRY(bad_req_id), +}; + +static const struct ena_stats ena_stats_rx_strings[] = { + ENA_STAT_RX_ENTRY(cnt), + ENA_STAT_RX_ENTRY(bytes), + ENA_STAT_RX_ENTRY(refil_partial), + ENA_STAT_RX_ENTRY(bad_csum), + ENA_STAT_RX_ENTRY(page_alloc_fail), + ENA_STAT_RX_ENTRY(skb_alloc_fail), + ENA_STAT_RX_ENTRY(dma_mapping_err), + ENA_STAT_RX_ENTRY(bad_desc_num), + ENA_STAT_RX_ENTRY(small_copy_len_pkt), +}; + +static const struct ena_stats ena_stats_ena_com_strings[] = { + ENA_STAT_ENA_COM_ENTRY(aborted_cmd), + ENA_STAT_ENA_COM_ENTRY(submitted_cmd), + ENA_STAT_ENA_COM_ENTRY(completed_cmd), + ENA_STAT_ENA_COM_ENTRY(out_of_space), + ENA_STAT_ENA_COM_ENTRY(no_completion), +}; + +#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) +#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) +#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) +#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) /** Vendor ID used by Amazon devices */ #define PCI_VENDOR_ID_AMAZON 0x1D0F @@ -80,11 +169,9 @@ #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 static struct rte_pci_id pci_id_ena_map[] = { -#define RTE_PCI_DEV_ID_DECL_ENA(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, - - RTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) - RTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) - {.device_id = 0}, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, + { .device_id = 0 }, }; static int ena_device_init(struct ena_com_dev *ena_dev, @@ -127,6 +214,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, static int ena_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); +static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); static struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, @@ -144,6 +232,18 @@ static struct eth_dev_ops ena_dev_ops = { .reta_query = ena_rss_reta_query, }; +#define NUMA_NO_NODE SOCKET_ID_ANY + +static inline int ena_cpu_to_node(int cpu) +{ + struct rte_config *config = rte_eal_get_configuration(); + + if (likely(cpu < RTE_MAX_MEMZONE)) + return config->mem_config->memzone[cpu].socket_id; + + return NUMA_NO_NODE; +} + static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, struct ena_com_rx_ctx *ena_rx_ctx) { @@ -226,6 +326,103 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } } +static void ena_config_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_admin_host_info *host_info; + int rc; + + /* Allocate only the host info */ + rc = ena_com_allocate_host_info(ena_dev); + if (rc) { + RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); + return; + } + + host_info = ena_dev->host_attr.host_info; + + host_info->os_type = ENA_ADMIN_OS_DPDK; + host_info->kernel_ver = RTE_VERSION; + strncpy((char *)host_info->kernel_ver_str, rte_version(), + strlen(rte_version())); + host_info->os_dist = RTE_VERSION; + strncpy((char *)host_info->os_dist_str, rte_version(), + strlen(rte_version())); + host_info->driver_version = + (DRV_MODULE_VER_MAJOR) | + (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | + (DRV_MODULE_VER_SUBMINOR << + ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); + + rc = ena_com_set_host_attributes(ena_dev); + if (rc) { + if (rc == -EPERM) + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; + } + + return; + +err: + ena_com_delete_host_info(ena_dev); +} + +static int +ena_get_sset_count(struct rte_eth_dev *dev, int sset) +{ + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + /* Workaround for clang: + * touch internal structures to prevent + * compiler error + */ + ENA_TOUCH(ena_stats_global_strings); + ENA_TOUCH(ena_stats_tx_strings); + ENA_TOUCH(ena_stats_rx_strings); + ENA_TOUCH(ena_stats_ena_com_strings); + + return dev->data->nb_tx_queues * + (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; +} + +static void ena_config_debug_area(struct ena_adapter *adapter) +{ + u32 debug_area_size; + int rc, ss_count; + + ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); + if (ss_count <= 0) { + RTE_LOG(ERR, PMD, "SS count is negative\n"); + return; + } + + /* allocate 32 bytes for each string and 64bit for the value */ + debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; + + rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); + if (rc) { + RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); + return; + } + + rc = ena_com_set_host_attributes(&adapter->ena_dev); + if (rc) { + if (rc == -EPERM) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + goto err; + } + + return; +err: + ena_com_delete_debug_area(&adapter->ena_dev); +} + static void ena_close(struct rte_eth_dev *dev) { struct ena_adapter *adapter = @@ -742,6 +939,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, __rte_unused unsigned int socket_id, __rte_unused const struct rte_eth_txconf *tx_conf) { + struct ena_com_create_io_ctx ctx = + /* policy set to _HOST just to satisfy icc compiler */ + { ENA_ADMIN_PLACEMENT_POLICY_HOST, + ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 }; struct ena_ring *txq = NULL; struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); @@ -767,11 +968,15 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, } ena_qid = ENA_IO_TXQ_IDX(queue_idx); - rc = ena_com_create_io_queue(ena_dev, ena_qid, - ENA_COM_IO_QUEUE_DIRECTION_TX, - ena_dev->tx_mem_queue_type, - -1 /* admin interrupts is not used */, - nb_desc); + + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; + ctx.qid = ena_qid; + ctx.msix_vector = -1; /* admin interrupts not used */ + ctx.mem_queue_type = ena_dev->tx_mem_queue_type; + ctx.queue_size = adapter->tx_ring_size; + ctx.numa_node = ena_cpu_to_node(queue_idx); + + rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) { RTE_LOG(ERR, PMD, "failed to create io TX queue #%d (qid:%d) rc: %d\n", @@ -780,6 +985,17 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &txq->ena_com_io_sq, + &txq->ena_com_io_cq); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to get TX queue handlers. TX queue num %d rc: %d\n", + queue_idx, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + goto err; + } + txq->port_id = dev->data->port_id; txq->next_to_clean = 0; txq->next_to_use = 0; @@ -808,7 +1024,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; - +err: return rc; } @@ -819,6 +1035,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { + struct ena_com_create_io_ctx ctx = + /* policy set to _HOST just to satisfy icc compiler */ + { ENA_ADMIN_PLACEMENT_POLICY_HOST, + ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 }; struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_ring *rxq = NULL; @@ -842,11 +1062,15 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, } ena_qid = ENA_IO_RXQ_IDX(queue_idx); - rc = ena_com_create_io_queue(ena_dev, ena_qid, - ENA_COM_IO_QUEUE_DIRECTION_RX, - ENA_ADMIN_PLACEMENT_POLICY_HOST, - -1 /* admin interrupts not used */, - nb_desc); + + ctx.qid = ena_qid; + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; + ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + ctx.msix_vector = -1; /* admin interrupts not used */ + ctx.queue_size = adapter->rx_ring_size; + ctx.numa_node = ena_cpu_to_node(queue_idx); + + rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", queue_idx, rc); @@ -854,6 +1078,16 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &rxq->ena_com_io_sq, + &rxq->ena_com_io_cq); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to get RX queue handlers. RX queue num %d rc: %d\n", + queue_idx, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + } + rxq->port_id = dev->data->port_id; rxq->next_to_clean = 0; rxq->next_to_use = 0; @@ -920,10 +1154,14 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, ring_size); } - rte_wmb(); - rxq->next_to_use = next_to_use; - /* let HW know that it can fill buffers with data */ - ena_com_write_sq_doorbell(rxq->ena_com_io_sq); + /* When we submitted free recources to device... */ + if (i > 0) { + /* ...let HW know that it can fill buffers with data */ + rte_wmb(); + ena_com_write_sq_doorbell(rxq->ena_com_io_sq); + + rxq->next_to_use = next_to_use; + } return i; } @@ -932,6 +1170,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct ena_com_dev_get_features_ctx *get_feat_ctx) { int rc; + bool readless_supported; /* Initialize mmio registers */ rc = ena_com_mmio_reg_read_request_init(ena_dev); @@ -940,6 +1179,14 @@ static int ena_device_init(struct ena_com_dev *ena_dev, return rc; } + /* The PCIe configuration space revision id indicate if mmio reg + * read is disabled. + */ + readless_supported = + !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id + & ENA_MMIO_DISABLE_REG_READ); + ena_com_set_mmio_read_mode(ena_dev, readless_supported); + /* reset device */ rc = ena_com_dev_reset(ena_dev); if (rc) { @@ -964,6 +1211,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, goto err_mmio_read_less; } + ena_config_host_info(ena_dev); + /* To enable the msix interrupts the driver needs to know the number * of queues. So the driver uses polling mode to retrieve this * information. @@ -1077,6 +1326,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) /* prepare ring structures */ ena_init_rings(adapter); + ena_config_debug_area(adapter); + /* Set max MTU for this device */ adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; @@ -1316,7 +1567,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct ena_tx_buffer *tx_info; struct ena_com_buf *ebuf; uint16_t rc, req_id, total_tx_descs = 0; - int sent_idx = 0; + uint16_t sent_idx = 0; int nb_hw_desc; /* Check adapter state */ @@ -1395,9 +1646,14 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, ring_size); } - /* Let HW do it's best :-) */ - rte_wmb(); - ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + /* If there are ready packets to be xmitted... */ + if (sent_idx > 0) { + /* ...let HW do its best :-) */ + rte_wmb(); + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + + tx_ring->next_to_use = next_to_use; + } /* Clear complete packets */ while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { @@ -1420,9 +1676,11 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, break; } - /* acknowledge completion of sent packets */ - ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); - tx_ring->next_to_use = next_to_use; + if (total_tx_descs > 0) { + /* acknowledge completion of sent packets */ + ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); + } + return sent_idx; } @@ -1445,9 +1703,9 @@ rte_ena_pmd_init(const char *name __rte_unused, }; struct rte_driver ena_pmd_drv = { - .name = "ena_driver", .type = PMD_PDEV, .init = rte_ena_pmd_init, }; -PMD_REGISTER_DRIVER(ena_pmd_drv); +PMD_REGISTER_DRIVER(ena_pmd_drv, ena); +DRIVER_REGISTER_PCI_TABLE(ena, pci_id_ena_map); diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h index aca853c1..61390a93 100644 --- a/drivers/net/ena/ena_ethdev.h +++ b/drivers/net/ena/ena_ethdev.h @@ -54,6 +54,8 @@ #define ENA_PKT_MAX_BUFS 17 +#define ENA_MMIO_DISABLE_REG_READ BIT(0) + #define ENA_CIRC_COUNT(head, tail, size) \ (((uint16_t)((uint16_t)(head) - (uint16_t)(tail))) & ((size) - 1)) @@ -124,6 +126,43 @@ struct ena_driver_stats { rte_atomic64_t rx_nombuf; }; +struct ena_stats_dev { + u64 tx_timeout; + u64 io_suspend; + u64 io_resume; + u64 wd_expired; + u64 interface_up; + u64 interface_down; + u64 admin_q_pause; +}; + +struct ena_stats_tx { + u64 cnt; + u64 bytes; + u64 queue_stop; + u64 prepare_ctx_err; + u64 queue_wakeup; + u64 dma_mapping_err; + u64 linearize; + u64 linearize_failed; + u64 tx_poll; + u64 doorbells; + u64 missing_tx_comp; + u64 bad_req_id; +}; + +struct ena_stats_rx { + u64 cnt; + u64 bytes; + u64 refil_partial; + u64 bad_csum; + u64 page_alloc_fail; + u64 skb_alloc_fail; + u64 dma_mapping_err; + u64 bad_desc_num; + u64 small_copy_len_pkt; +}; + /* board specific private data structure */ struct ena_adapter { /* OS defined structs */ diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c index 9b9ff4d7..7c4119c3 100644 --- a/drivers/net/enic/base/vnic_wq.c +++ b/drivers/net/enic/base/vnic_wq.c @@ -197,6 +197,8 @@ void vnic_wq_clean(struct vnic_wq *wq, wq->head_idx = 0; wq->tail_idx = 0; + wq->last_completed_index = 0; + *((uint32_t *)wq->cqmsg_rz->addr) = 0; iowrite32(0, &wq->ctrl->fetch_index); iowrite32(0, &wq->ctrl->posted_index); diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index 53fed0b8..a5e2e389 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -152,6 +152,12 @@ struct enic { /* software counters */ struct enic_soft_stats soft_stats; + /* configured resources on vic */ + unsigned int conf_rq_count; + unsigned int conf_wq_count; + unsigned int conf_cq_count; + unsigned int conf_intr_count; + /* linked list storing memory allocations */ LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list; rte_spinlock_t memzone_list_lock; @@ -221,18 +227,6 @@ enic_ring_incr(uint32_t n_descriptors, uint32_t idx) return idx; } -#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG -#define ENIC_ASSERT(cond) \ - do { \ - if (unlikely(!(cond))) { \ - rte_panic("line %d\tassert \"" #cond "\"" \ - "failed\n", __LINE__); \ - } \ - } while (0) -#else -#define ENIC_ASSERT(cond) do {} while (0) -#endif - extern void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats); extern int enic_fdir_add_fltr(struct enic *enic, diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index a7ce064f..3c87b49e 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -57,15 +57,11 @@ /* * The set of PCI devices this driver supports */ +#define CISCO_PCI_VENDOR_ID 0x1137 static const struct rte_pci_id pci_id_enic_map[] = { -#define RTE_PCI_DEV_ID_DECL_ENIC(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#ifndef PCI_VENDOR_ID_CISCO -#define PCI_VENDOR_ID_CISCO 0x1137 -#endif -#include "rte_pci_dev_ids.h" -RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) -RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) -{.vendor_id = 0, /* Sentinal */}, + { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) }, + { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) }, + {.vendor_id = 0, /* sentinel */}, }; static int @@ -436,8 +432,9 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); - device_info->max_rx_queues = enic->rq_count; - device_info->max_tx_queues = enic->wq_count; + /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */ + device_info->max_rx_queues = enic->conf_rq_count / 2; + device_info->max_tx_queues = enic->conf_wq_count; device_info->min_rx_bufsize = ENIC_MIN_MTU; device_info->max_rx_pktlen = enic->rte_dev->data->mtu + ETHER_HDR_LEN + 4; @@ -636,4 +633,5 @@ static struct rte_driver rte_enic_driver = { .init = rte_enic_pmd_init, }; -PMD_REGISTER_DRIVER(rte_enic_driver); +PMD_REGISTER_DRIVER(rte_enic_driver, enic); +DRIVER_REGISTER_PCI_TABLE(enic, pci_id_enic_map); diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index dc831b48..d8669cc0 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -203,7 +203,7 @@ void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr) return; } - err = vnic_dev_del_addr(enic->vdev, mac_addr); + err = vnic_dev_del_addr(enic->vdev, enic->mac_addr); if (err) { dev_err(enic, "del mac addr failed\n"); return; @@ -334,6 +334,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold); iowrite32(rq->posted_index, &rq->ctrl->posted_index); + iowrite32(0, &rq->ctrl->fetch_index); rte_rmb(); return 0; @@ -455,6 +456,8 @@ int enic_enable(struct enic *enic) for (index = 0; index < enic->rq_count; index++) enic_start_rq(enic, index); + vnic_dev_add_addr(enic->vdev, enic->mac_addr); + vnic_dev_enable_wait(enic->vdev); /* Register and enable error interrupt */ @@ -971,8 +974,6 @@ int enic_setup_finish(struct enic *enic) return -1; } - vnic_dev_add_addr(enic->vdev, enic->mac_addr); - /* Default conf */ vnic_dev_packet_filter(enic->vdev, 1 /* directed */, @@ -1015,21 +1016,23 @@ int enic_set_vnic_res(struct enic *enic) /* With Rx scatter support, two RQs are now used per RQ used by * the application. */ - if (enic->rq_count < (eth_dev->data->nb_rx_queues * 2)) { + if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) { dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n", eth_dev->data->nb_rx_queues, - eth_dev->data->nb_rx_queues * 2, enic->rq_count); + eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count); rc = -EINVAL; } - if (enic->wq_count < eth_dev->data->nb_tx_queues) { + if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) { dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n", - eth_dev->data->nb_tx_queues, enic->wq_count); + eth_dev->data->nb_tx_queues, enic->conf_wq_count); rc = -EINVAL; } - if (enic->cq_count < (enic->rq_count + enic->wq_count)) { + if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues + + eth_dev->data->nb_tx_queues)) { dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n", - enic->rq_count + enic->wq_count, enic->cq_count); + (eth_dev->data->nb_rx_queues + + eth_dev->data->nb_tx_queues), enic->conf_cq_count); rc = -EINVAL; } diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c index b271d340..84c5d336 100644 --- a/drivers/net/enic/enic_res.c +++ b/drivers/net/enic/enic_res.c @@ -215,14 +215,14 @@ void enic_free_vnic_resources(struct enic *enic) void enic_get_res_counts(struct enic *enic) { - enic->wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); - enic->rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); - enic->cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); - enic->intr_count = vnic_dev_get_res_count(enic->vdev, + enic->conf_wq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ); + enic->conf_rq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_RQ); + enic->conf_cq_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_CQ); + enic->conf_intr_count = vnic_dev_get_res_count(enic->vdev, RES_TYPE_INTR_CTRL); dev_info(enic_get_dev(enic), "vNIC resources avail: wq %d rq %d cq %d intr %d\n", - enic->wq_count, enic->rq_count, - enic->cq_count, enic->intr_count); + enic->conf_wq_count, enic->conf_rq_count, + enic->conf_cq_count, enic->conf_intr_count); } diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 5ac1d69c..2f4a08c5 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -400,7 +400,7 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) buf = &wq->bufs[tail_idx]; m = (struct rte_mbuf *)(buf->mb); if (likely(m->pool == pool)) { - ENIC_ASSERT(nb_free < ENIC_MAX_WQ_DESCS); + RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS); free[nb_free++] = m; } else { rte_mempool_put_bulk(pool, (void *)free, nb_free); diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index eb77705e..217853fb 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -3049,9 +3049,9 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev) * and SRIOV-VF devices. */ static const struct rte_pci_id pci_id_fm10k_map[] = { -#define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) }, -#define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) }, -#include "rte_pci_dev_ids.h" + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) }, + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) }, + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -3086,4 +3086,5 @@ static struct rte_driver rte_fm10k_driver = { .init = rte_pmd_fm10k_init, }; -PMD_REGISTER_DRIVER(rte_fm10k_driver); +PMD_REGISTER_DRIVER(rte_fm10k_driver, fm10k); +DRIVER_REGISTER_PCI_TABLE(fm10k, pci_id_fm10k_map); diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c index dd92a91e..5b2d04bf 100644 --- a/drivers/net/fm10k/fm10k_rxtx.c +++ b/drivers/net/fm10k/fm10k_rxtx.c @@ -114,10 +114,10 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh); for (count = 0; count < nb_pkts; ++count) { + if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD)) + break; mbuf = q->sw_ring[next_dd]; desc = q->hw_ring[next_dd]; - if (!(desc.d.staterr & FM10K_RXD_STATUS_DD)) - break; #ifdef RTE_LIBRTE_FM10K_DEBUG_RX dump_rxd(&desc); #endif @@ -228,10 +228,10 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh); for (count = 0; count < nb_seg; count++) { + if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD)) + break; mbuf = q->sw_ring[next_dd]; desc = q->hw_ring[next_dd]; - if (!(desc.d.staterr & FM10K_RXD_STATUS_DD)) - break; #ifdef RTE_LIBRTE_FM10K_DEBUG_RX dump_rxd(&desc); #endif diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index f414d938..daac2361 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -440,8 +440,6 @@ static int i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, static int i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id); -static int i40e_get_reg_length(struct rte_eth_dev *dev); - static int i40e_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs); @@ -456,9 +454,28 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static const struct rte_pci_id pci_id_i40e_map[] = { -#define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" -{ .vendor_id = 0, /* sentinel */ }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_I_X722) }, + { .vendor_id = 0, /* sentinel */ }, }; static const struct eth_dev_ops i40e_eth_dev_ops = { @@ -524,7 +541,6 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .timesync_adjust_time = i40e_timesync_adjust_time, .timesync_read_time = i40e_timesync_read_time, .timesync_write_time = i40e_timesync_write_time, - .get_reg_length = i40e_get_reg_length, .get_reg = i40e_get_regs, .get_eeprom_length = i40e_get_eeprom_length, .get_eeprom = i40e_get_eeprom, @@ -705,7 +721,8 @@ static struct rte_driver rte_i40e_driver = { .init = rte_i40e_pmd_init, }; -PMD_REGISTER_DRIVER(rte_i40e_driver); +PMD_REGISTER_DRIVER(rte_i40e_driver, i40e); +DRIVER_REGISTER_PCI_TABLE(i40e, pci_id_i40e_map); /* * Initialize registers for flexible payload, which should be set by NVM. @@ -2701,12 +2718,16 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (mask & ETH_VLAN_FILTER_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (dev->data->dev_conf.rxmode.hw_vlan_filter) { + i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid, false, NULL); i40e_vsi_config_vlan_filter(vsi, TRUE); - else + } else { + i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid, true, NULL); i40e_vsi_config_vlan_filter(vsi, FALSE); + } } if (mask & ETH_VLAN_STRIP_MASK) { @@ -2952,9 +2973,10 @@ i40e_macaddr_add(struct rte_eth_dev *dev, int ret; /* If VMDQ not enabled or configured, return */ - if (pool != 0 && (!(pf->flags | I40E_FLAG_VMDQ) || !pf->nb_cfg_vmdq_vsi)) { + if (pool != 0 && (!(pf->flags & I40E_FLAG_VMDQ) || + !pf->nb_cfg_vmdq_vsi)) { PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u", - pf->flags | I40E_FLAG_VMDQ ? "configured" : "enabled", + pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled", pool); return; } @@ -3005,7 +3027,7 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) vsi = pf->main_vsi; else { /* No VMDQ pool enabled or configured */ - if (!(pf->flags | I40E_FLAG_VMDQ) || + if (!(pf->flags & I40E_FLAG_VMDQ) || (i > pf->nb_cfg_vmdq_vsi)) { PMD_DRV_LOG(ERR, "No VMDQ pool enabled" "/configured"); @@ -3167,13 +3189,16 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) static int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { - struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); - struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_pf *pf; + struct i40e_hw *hw; int ret; if (!vsi || !lut) return -EINVAL; + pf = I40E_VSI_TO_PF(vsi); + hw = I40E_VSI_TO_HW(vsi); + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, TRUE, lut, lut_size); @@ -5752,17 +5777,28 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi, uint16_t vlan_id, bool on) { uint32_t vid_idx, vid_bit; + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0}; + int ret; if (vlan_id > ETH_VLAN_ID_MAX) return; vid_idx = I40E_VFTA_IDX(vlan_id); vid_bit = I40E_VFTA_BIT(vlan_id); + vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id); - if (on) + if (on) { + ret = i40e_aq_add_vlan(hw, vsi->seid, &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to add vlan filter"); vsi->vfta[vid_idx] |= vid_bit; - else + } else { + ret = i40e_aq_remove_vlan(hw, vsi->seid, &vlan_data, 1, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to remove vlan filter"); vsi->vfta[vid_idx] &= ~vid_bit; + } } /** @@ -6904,6 +6940,9 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, mask &= ~(1UL << i); /* Bit set indicats the coresponding flow type is supported */ g_cfg->valid_bit_mask[0] |= (1UL << i); + /* if flowtype is invalid, continue */ + if (!I40E_VALID_FLOW(i)) + continue; pctype = i40e_flowtype_to_pctype(i); reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype)); if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) @@ -6975,6 +7014,9 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, if (!(mask0 & (1UL << i))) continue; mask0 &= ~(1UL << i); + /* if flowtype is invalid, continue */ + if (!I40E_VALID_FLOW(i)) + continue; pctype = i40e_flowtype_to_pctype(i); reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; @@ -7537,13 +7579,11 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, return -EINVAL; } - pctype = i40e_flowtype_to_pctype(conf->flow_type); - if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) { - PMD_DRV_LOG(ERR, "Not supported flow type (%u)", - conf->flow_type); + if (!I40E_VALID_FLOW(conf->flow_type)) { + PMD_DRV_LOG(ERR, "invalid flow_type input."); return -EINVAL; } - + pctype = i40e_flowtype_to_pctype(conf->flow_type); ret = i40e_parse_input_set(&input_set, pctype, conf->field, conf->inset_size); if (ret) { @@ -7608,12 +7648,11 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, return -EINVAL; } - pctype = i40e_flowtype_to_pctype(conf->flow_type); - if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) { - PMD_DRV_LOG(ERR, "Not supported flow type (%u)", - conf->flow_type); + if (!I40E_VALID_FLOW(conf->flow_type)) { + PMD_DRV_LOG(ERR, "invalid flow_type input."); return -EINVAL; } + pctype = i40e_flowtype_to_pctype(conf->flow_type); ret = i40e_parse_input_set(&input_set, pctype, conf->field, conf->inset_size); if (ret) { @@ -9342,12 +9381,6 @@ i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) return 0; } -static int i40e_get_reg_length(__rte_unused struct rte_eth_dev *dev) -{ - /* Highest base addr + 32-bit word */ - return I40E_GLGEN_STAT_CLEAR + 4; -} - static int i40e_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) { @@ -9356,6 +9389,12 @@ static int i40e_get_regs(struct rte_eth_dev *dev, uint32_t reg_idx, arr_idx, arr_idx2, reg_offset; const struct i40e_reg_info *reg_info; + if (ptr_data == NULL) { + regs->length = I40E_GLGEN_STAT_CLEAR + 4; + regs->width = sizeof(uint32_t); + return 0; + } + /* The first few registers have to be read using AQ operations */ reg_idx = 0; while (i40e_regs_adminq[reg_idx].name) { diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 7b6df1d8..a616ae0b 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1110,9 +1110,12 @@ i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link) } static const struct rte_pci_id pci_id_i40evf_map[] = { -#define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" -{ .vendor_id = 0, /* sentinel */ }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) }, + { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV) }, + { .vendor_id = 0, /* sentinel */ }, }; static inline int @@ -1581,7 +1584,8 @@ static struct rte_driver rte_i40evf_driver = { .init = rte_i40evf_pmd_init, }; -PMD_REGISTER_DRIVER(rte_i40evf_driver); +PMD_REGISTER_DRIVER(rte_i40evf_driver, i40evf); +DRIVER_REGISTER_PCI_TABLE(i40evf, pci_id_i40evf_map); static int i40evf_dev_configure(struct rte_eth_dev *dev) @@ -2377,13 +2381,16 @@ i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) static int i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { - struct i40e_vf *vf = I40E_VSI_TO_VF(vsi); - struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_vf *vf; + struct i40e_hw *hw; int ret; if (!vsi || !lut) return -EINVAL; + vf = I40E_VSI_TO_VF(vsi); + hw = I40E_VSI_TO_HW(vsi); + if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) { ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE, lut, lut_size); diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 0629b426..d478a159 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -538,7 +538,6 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .timesync_disable = ixgbe_timesync_disable, .timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp, .timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp, - .get_reg_length = ixgbe_get_reg_length, .get_reg = ixgbe_get_regs, .get_eeprom_length = ixgbe_get_eeprom_length, .get_eeprom = ixgbe_get_eeprom, @@ -589,7 +588,6 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .rxq_info_get = ixgbe_rxq_info_get, .txq_info_get = ixgbe_txq_info_get, .mac_addr_set = ixgbevf_set_default_mac_addr, - .get_reg_length = ixgbevf_get_reg_length, .get_reg = ixgbevf_get_regs, .reta_update = ixgbe_dev_rss_reta_update, .reta_query = ixgbe_dev_rss_reta_query, @@ -6316,6 +6314,12 @@ ixgbe_get_regs(struct rte_eth_dev *dev, const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ? ixgbe_regs_mac_82598EB : ixgbe_regs_others; + if (data == NULL) { + regs->length = ixgbe_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + /* Support only full register dump */ if ((regs->length == 0) || (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) { @@ -6340,6 +6344,12 @@ ixgbevf_get_regs(struct rte_eth_dev *dev, int count = 0; const struct reg_info *reg_group; + if (data == NULL) { + regs->length = ixgbevf_get_reg_length(dev); + regs->width = sizeof(uint32_t); + return 0; + } + /* Support only full register dump */ if ((regs->length == 0) || (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) { @@ -7352,5 +7362,7 @@ static struct rte_driver rte_ixgbevf_driver = { .init = rte_ixgbevf_pmd_init, }; -PMD_REGISTER_DRIVER(rte_ixgbe_driver); -PMD_REGISTER_DRIVER(rte_ixgbevf_driver); +PMD_REGISTER_DRIVER(rte_ixgbe_driver, ixgbe); +DRIVER_REGISTER_PCI_TABLE(ixgbe, pci_id_ixgbe_map); +PMD_REGISTER_DRIVER(rte_ixgbevf_driver, ixgbevf); +DRIVER_REGISTER_PCI_TABLE(ixgbevf, pci_id_ixgbevf_map); diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c index 4f95debd..1c4fd7c1 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -197,7 +197,9 @@ desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags, rx_pkts[3]->ol_flags = vol.e[3]; } #else -#define desc_to_olflags_v(desc, rx_pkts) do {} while (0) +#define desc_to_olflags_v(desc, vlan_flags, rx_pkts) do { \ + RTE_SET_USED(vlan_flags); \ + } while (0) #endif /* diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index f8ed42b8..304c8461 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -689,7 +689,7 @@ priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1) return -1; tmp &= keep; - tmp |= flags; + tmp |= (flags & (~keep)); return priv_set_sysfs_ulong(priv, "flags", tmp); } @@ -4328,6 +4328,90 @@ mlx4_dev_close(struct rte_eth_dev *dev) } /** + * Change the link state (UP / DOWN). + * + * @param priv + * Pointer to Ethernet device private data. + * @param up + * Nonzero for link up, otherwise link down. + * + * @return + * 0 on success, errno value on failure. + */ +static int +priv_set_link(struct priv *priv, int up) +{ + struct rte_eth_dev *dev = priv->dev; + int err; + unsigned int i; + + if (up) { + err = priv_set_flags(priv, ~IFF_UP, IFF_UP); + if (err) + return err; + for (i = 0; i < priv->rxqs_n; i++) + if ((*priv->rxqs)[i]->sp) + break; + /* Check if an sp queue exists. + * Note: Some old frames might be received. + */ + if (i == priv->rxqs_n) + dev->rx_pkt_burst = mlx4_rx_burst; + else + dev->rx_pkt_burst = mlx4_rx_burst_sp; + dev->tx_pkt_burst = mlx4_tx_burst; + } else { + err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP); + if (err) + return err; + dev->rx_pkt_burst = removed_rx_burst; + dev->tx_pkt_burst = removed_tx_burst; + } + return 0; +} + +/** + * DPDK callback to bring the link DOWN. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +mlx4_set_link_down(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int err; + + priv_lock(priv); + err = priv_set_link(priv, 0); + priv_unlock(priv); + return err; +} + +/** + * DPDK callback to bring the link UP. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +mlx4_set_link_up(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + int err; + + priv_lock(priv); + err = priv_set_link(priv, 1); + priv_unlock(priv); + return err; +} +/** * DPDK callback to get information about the device. * * @param dev @@ -5134,6 +5218,8 @@ static const struct eth_dev_ops mlx4_dev_ops = { .dev_configure = mlx4_dev_configure, .dev_start = mlx4_dev_start, .dev_stop = mlx4_dev_stop, + .dev_set_link_down = mlx4_set_link_down, + .dev_set_link_up = mlx4_set_link_up, .dev_close = mlx4_dev_close, .promiscuous_enable = mlx4_promiscuous_enable, .promiscuous_disable = mlx4_promiscuous_disable, @@ -5857,8 +5943,8 @@ rte_mlx4_pmd_init(const char *name, const char *args) static struct rte_driver rte_mlx4_driver = { .type = PMD_PDEV, - .name = MLX4_DRIVER_NAME, .init = rte_mlx4_pmd_init, }; -PMD_REGISTER_DRIVER(rte_mlx4_driver) +PMD_REGISTER_DRIVER(rte_mlx4_driver, mlx4); +DRIVER_REGISTER_PCI_TABLE(mlx4, mlx4_pci_id_map); diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 5aa4adc6..d96a9aff 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -758,8 +758,8 @@ rte_mlx5_pmd_init(const char *name, const char *args) static struct rte_driver rte_mlx5_driver = { .type = PMD_PDEV, - .name = MLX5_DRIVER_NAME, .init = rte_mlx5_pmd_init, }; -PMD_REGISTER_DRIVER(rte_mlx5_driver) +PMD_REGISTER_DRIVER(rte_mlx5_driver, mlx5); +DRIVER_REGISTER_PCI_TABLE(mlx5, mlx5_pci_id_map); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 0e7ed019..130e15d5 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -461,7 +461,7 @@ priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags) if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1) return -1; tmp &= keep; - tmp |= flags; + tmp |= (flags & (~keep)); return priv_set_sysfs_ulong(priv, "flags", tmp); } @@ -1150,7 +1150,7 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) /** * Change the link state (UP / DOWN). * - * @param dev + * @param priv * Pointer to Ethernet device structure. * @param up * Nonzero for link up, otherwise link down. diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 0c352f3f..615de945 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1572,7 +1572,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rte_prefetch0(wqe); rep = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(rep == NULL)) { - while (pkt) { + while (pkt != seg) { + assert(pkt != (*rxq->elts)[idx]); seg = NEXT(pkt); rte_mbuf_refcnt_set(pkt, 0); __rte_mbuf_raw_free(pkt); @@ -1599,6 +1600,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) pkt = seg; assert(len >= (rxq->crc_present << 2)); /* Update packet information. */ + pkt->packet_type = 0; + pkt->ol_flags = 0; if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip | rxq->crc_present) { if (rxq->csum) { diff --git a/drivers/net/mpipe/mpipe_tilegx.c b/drivers/net/mpipe/mpipe_tilegx.c index 26e14248..93f87308 100644 --- a/drivers/net/mpipe/mpipe_tilegx.c +++ b/drivers/net/mpipe/mpipe_tilegx.c @@ -1624,19 +1624,17 @@ rte_pmd_mpipe_devinit(const char *ifname, } static struct rte_driver pmd_mpipe_xgbe_drv = { - .name = "xgbe", .type = PMD_VDEV, .init = rte_pmd_mpipe_devinit, }; static struct rte_driver pmd_mpipe_gbe_drv = { - .name = "gbe", .type = PMD_VDEV, .init = rte_pmd_mpipe_devinit, }; -PMD_REGISTER_DRIVER(pmd_mpipe_xgbe_drv); -PMD_REGISTER_DRIVER(pmd_mpipe_gbe_drv); +PMD_REGISTER_DRIVER(pmd_mpipe_xgbe_drv, xgbe); +PMD_REGISTER_DRIVER(pmd_mpipe_gbe_drv, gbe); static void __attribute__((constructor, used)) mpipe_init_contexts(void) diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 6afd49b1..82e3e4e1 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -2486,7 +2486,8 @@ static struct rte_driver rte_nfp_net_driver = { .init = nfp_net_pmd_init, }; -PMD_REGISTER_DRIVER(rte_nfp_net_driver); +PMD_REGISTER_DRIVER(rte_nfp_net_driver, nfp); +DRIVER_REGISTER_PCI_TABLE(nfp, pci_id_nfp_net_map); /* * Local variables: diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c index ab440f3b..7a248842 100644 --- a/drivers/net/null/rte_eth_null.c +++ b/drivers/net/null/rte_eth_null.c @@ -687,10 +687,12 @@ rte_pmd_null_devuninit(const char *name) } static struct rte_driver pmd_null_drv = { - .name = "eth_null", .type = PMD_VDEV, .init = rte_pmd_null_devinit, .uninit = rte_pmd_null_devuninit, }; -PMD_REGISTER_DRIVER(pmd_null_drv); +PMD_REGISTER_DRIVER(pmd_null_drv, eth_null); +DRIVER_REGISTER_PARAM_STRING(eth_null, + "size=<int> " + "copy=<int>"); diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index c86f17b6..7e213ebb 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -1084,10 +1084,15 @@ rte_pmd_pcap_devuninit(const char *name) } static struct rte_driver pmd_pcap_drv = { - .name = "eth_pcap", .type = PMD_VDEV, .init = rte_pmd_pcap_devinit, .uninit = rte_pmd_pcap_devuninit, }; -PMD_REGISTER_DRIVER(pmd_pcap_drv); +PMD_REGISTER_DRIVER(pmd_pcap_drv, eth_pcap); +DRIVER_REGISTER_PARAM_STRING(eth_pcap, + "rx_pcap=<string> " + "tx_pcap=<string> " + "rx_iface=<ifc> " + "tx_iface=<ifc> " + "iface=<ifc>"); diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index bb531be5..82e44b8f 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -14,6 +14,151 @@ static const struct qed_eth_ops *qed_ops; static const char *drivername = "qede pmd"; static int64_t timer_period = 1; +struct rte_qede_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint64_t offset; +}; + +static const struct rte_qede_xstats_name_off qede_xstats_strings[] = { + {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)}, + {"rx_multicast_bytes", + offsetof(struct ecore_eth_stats, rx_mcast_bytes)}, + {"rx_broadcast_bytes", + offsetof(struct ecore_eth_stats, rx_bcast_bytes)}, + {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)}, + {"rx_multicast_packets", + offsetof(struct ecore_eth_stats, rx_mcast_pkts)}, + {"rx_broadcast_packets", + offsetof(struct ecore_eth_stats, rx_bcast_pkts)}, + + {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)}, + {"tx_multicast_bytes", + offsetof(struct ecore_eth_stats, tx_mcast_bytes)}, + {"tx_broadcast_bytes", + offsetof(struct ecore_eth_stats, tx_bcast_bytes)}, + {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)}, + {"tx_multicast_packets", + offsetof(struct ecore_eth_stats, tx_mcast_pkts)}, + {"tx_broadcast_packets", + offsetof(struct ecore_eth_stats, tx_bcast_pkts)}, + + {"rx_64_byte_packets", + offsetof(struct ecore_eth_stats, rx_64_byte_packets)}, + {"rx_65_to_127_byte_packets", + offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)}, + {"rx_128_to_255_byte_packets", + offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)}, + {"rx_256_to_511_byte_packets", + offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)}, + {"rx_512_to_1023_byte_packets", + offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)}, + {"rx_1024_to_1518_byte_packets", + offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)}, + {"rx_1519_to_1522_byte_packets", + offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)}, + {"rx_1519_to_2047_byte_packets", + offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)}, + {"rx_2048_to_4095_byte_packets", + offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)}, + {"rx_4096_to_9216_byte_packets", + offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)}, + {"rx_9217_to_16383_byte_packets", + offsetof(struct ecore_eth_stats, + rx_9217_to_16383_byte_packets)}, + {"tx_64_byte_packets", + offsetof(struct ecore_eth_stats, tx_64_byte_packets)}, + {"tx_65_to_127_byte_packets", + offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)}, + {"tx_128_to_255_byte_packets", + offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)}, + {"tx_256_to_511_byte_packets", + offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)}, + {"tx_512_to_1023_byte_packets", + offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)}, + {"tx_1024_to_1518_byte_packets", + offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)}, + {"trx_1519_to_1522_byte_packets", + offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)}, + {"tx_2048_to_4095_byte_packets", + offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)}, + {"tx_4096_to_9216_byte_packets", + offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)}, + {"tx_9217_to_16383_byte_packets", + offsetof(struct ecore_eth_stats, + tx_9217_to_16383_byte_packets)}, + + {"rx_mac_crtl_frames", + offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)}, + {"tx_mac_control_frames", + offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)}, + {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)}, + {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)}, + {"rx_priority_flow_control_frames", + offsetof(struct ecore_eth_stats, rx_pfc_frames)}, + {"tx_priority_flow_control_frames", + offsetof(struct ecore_eth_stats, tx_pfc_frames)}, + + {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)}, + {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)}, + {"rx_carrier_errors", + offsetof(struct ecore_eth_stats, rx_carrier_errors)}, + {"rx_oversize_packet_errors", + offsetof(struct ecore_eth_stats, rx_oversize_packets)}, + {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)}, + {"rx_undersize_packet_errors", + offsetof(struct ecore_eth_stats, rx_undersize_packets)}, + {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)}, + {"rx_host_buffer_not_available", + offsetof(struct ecore_eth_stats, no_buff_discards)}, + /* Number of packets discarded because they are bigger than MTU */ + {"rx_packet_too_big_discards", + offsetof(struct ecore_eth_stats, packet_too_big_discard)}, + {"rx_ttl_zero_discards", + offsetof(struct ecore_eth_stats, ttl0_discard)}, + {"rx_multi_function_tag_filter_discards", + offsetof(struct ecore_eth_stats, mftag_filter_discards)}, + {"rx_mac_filter_discards", + offsetof(struct ecore_eth_stats, mac_filter_discards)}, + {"rx_hw_buffer_truncates", + offsetof(struct ecore_eth_stats, brb_truncates)}, + {"rx_hw_buffer_discards", + offsetof(struct ecore_eth_stats, brb_discards)}, + {"tx_lpi_entry_count", + offsetof(struct ecore_eth_stats, tx_lpi_entry_count)}, + {"tx_total_collisions", + offsetof(struct ecore_eth_stats, tx_total_collisions)}, + {"tx_error_drop_packets", + offsetof(struct ecore_eth_stats, tx_err_drop_pkts)}, + + {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)}, + {"rx_mac_unicast_packets", + offsetof(struct ecore_eth_stats, rx_mac_uc_packets)}, + {"rx_mac_multicast_packets", + offsetof(struct ecore_eth_stats, rx_mac_mc_packets)}, + {"rx_mac_broadcast_packets", + offsetof(struct ecore_eth_stats, rx_mac_bc_packets)}, + {"rx_mac_frames_ok", + offsetof(struct ecore_eth_stats, rx_mac_frames_ok)}, + {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)}, + {"tx_mac_unicast_packets", + offsetof(struct ecore_eth_stats, tx_mac_uc_packets)}, + {"tx_mac_multicast_packets", + offsetof(struct ecore_eth_stats, tx_mac_mc_packets)}, + {"tx_mac_broadcast_packets", + offsetof(struct ecore_eth_stats, tx_mac_bc_packets)}, + + {"lro_coalesced_packets", + offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)}, + {"lro_coalesced_events", + offsetof(struct ecore_eth_stats, tpa_coalesced_events)}, + {"lro_aborts_num", + offsetof(struct ecore_eth_stats, tpa_aborts_num)}, + {"lro_not_coalesced_packets", + offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)}, + {"lro_coalesced_bytes", + offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)}, +}; + static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) { ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); @@ -651,15 +796,52 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) stats.tx_mcast_bytes + stats.tx_bcast_bytes; eth_stats->oerrors = stats.tx_err_drop_pkts; +} + +static int +qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit) +{ + unsigned int i, stat_cnt = RTE_DIM(qede_xstats_strings); - DP_INFO(edev, - "no_buff_discards=%" PRIu64 "" - " mac_filter_discards=%" PRIu64 "" - " brb_truncates=%" PRIu64 "" - " brb_discards=%" PRIu64 "\n", - stats.no_buff_discards, - stats.mac_filter_discards, - stats.brb_truncates, stats.brb_discards); + if (xstats_names != NULL) + for (i = 0; i < stat_cnt; i++) + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", + qede_xstats_strings[i].name); + + return stat_cnt; +} + +static int +qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct ecore_eth_stats stats; + unsigned int num = RTE_DIM(qede_xstats_strings); + + if (n < num) + return num; + + qdev->ops->get_vport_stats(edev, &stats); + + for (num = 0; num < n; num++) + xstats[num].value = *(u64 *)(((char *)&stats) + + qede_xstats_strings[num].offset); + + return num; +} + +static void +qede_reset_xstats(struct rte_eth_dev *dev) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + ecore_reset_vport_stats(edev); } int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) @@ -976,6 +1158,9 @@ static const struct eth_dev_ops qede_eth_dev_ops = { .dev_close = qede_dev_close, .stats_get = qede_get_stats, .stats_reset = qede_reset_stats, + .xstats_get = qede_get_xstats, + .xstats_reset = qede_reset_xstats, + .xstats_get_names = qede_get_xstats_names, .mac_addr_add = qede_mac_addr_add, .mac_addr_remove = qede_mac_addr_remove, .mac_addr_set = qede_mac_addr_set, @@ -1010,6 +1195,9 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = { .dev_close = qede_dev_close, .stats_get = qede_get_stats, .stats_reset = qede_reset_stats, + .xstats_get = qede_get_xstats, + .xstats_reset = qede_reset_xstats, + .xstats_get_names = qede_get_xstats_names, .vlan_offload_set = qede_vlan_offload_set, .vlan_filter_set = qede_vlan_filter_set, .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, @@ -1340,5 +1528,7 @@ static struct rte_driver rte_qede_driver = { .init = rte_qedevf_pmd_init }; -PMD_REGISTER_DRIVER(rte_qede_driver); -PMD_REGISTER_DRIVER(rte_qedevf_driver); +PMD_REGISTER_DRIVER(rte_qede_driver, qede); +DRIVER_REGISTER_PCI_TABLE(qede, pci_id_qede_map); +PMD_REGISTER_DRIVER(rte_qedevf_driver, qedevf); +DRIVER_REGISTER_PCI_TABLE(qedevf, pci_id_qedevf_map); diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c index b1783c3e..a7048c77 100644 --- a/drivers/net/ring/rte_eth_ring.c +++ b/drivers/net/ring/rte_eth_ring.c @@ -624,10 +624,11 @@ rte_pmd_ring_devuninit(const char *name) } static struct rte_driver pmd_ring_drv = { - .name = "eth_ring", .type = PMD_VDEV, .init = rte_pmd_ring_devinit, .uninit = rte_pmd_ring_devuninit, }; -PMD_REGISTER_DRIVER(pmd_ring_drv); +PMD_REGISTER_DRIVER(pmd_ring_drv, eth_ring); +DRIVER_REGISTER_PARAM_STRING(eth_ring, + "nodeaction=[attach|detach]"); diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c index 985a8d60..483d7894 100644 --- a/drivers/net/szedata2/rte_eth_szedata2.c +++ b/drivers/net/szedata2/rte_eth_szedata2.c @@ -62,7 +62,7 @@ */ #define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8 -#define RTE_SZEDATA2_DRIVER_NAME "rte_szedata2_pmd" +#define RTE_SZEDATA2_DRIVER_NAME rte_szedata2_pmd #define RTE_SZEDATA2_PCI_DRIVER_NAME "rte_szedata2_pmd" #define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u" @@ -1596,9 +1596,9 @@ rte_szedata2_uninit(const char *name __rte_unused) static struct rte_driver rte_szedata2_driver = { .type = PMD_PDEV, - .name = RTE_SZEDATA2_DRIVER_NAME, .init = rte_szedata2_init, .uninit = rte_szedata2_uninit, }; -PMD_REGISTER_DRIVER(rte_szedata2_driver); +PMD_REGISTER_DRIVER(rte_szedata2_driver, RTE_SZEDATA2_DRIVER_NAME); +DRIVER_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table); diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h index 88ecd175..2f2b2259 100644 --- a/drivers/net/thunderx/base/nicvf_hw_defs.h +++ b/drivers/net/thunderx/base/nicvf_hw_defs.h @@ -164,6 +164,7 @@ #define RBDR_QUEUE_SZ_128K (128 * 1024) #define RBDR_QUEUE_SZ_256K (256 * 1024) #define RBDR_QUEUE_SZ_512K (512 * 1024) +#define RBDR_QUEUE_SZ_MAX RBDR_QUEUE_SZ_512K #define RBDR_SIZE_SHIFT (13) /* 8k */ @@ -174,6 +175,7 @@ #define SND_QUEUE_SZ_16K (16 * 1024) #define SND_QUEUE_SZ_32K (32 * 1024) #define SND_QUEUE_SZ_64K (64 * 1024) +#define SND_QUEUE_SZ_MAX SND_QUEUE_SZ_64K #define SND_QSIZE_SHIFT (10) /* 1k */ @@ -184,6 +186,7 @@ #define CMP_QUEUE_SZ_16K (16 * 1024) #define CMP_QUEUE_SZ_32K (32 * 1024) #define CMP_QUEUE_SZ_64K (64 * 1024) +#define CMP_QUEUE_SZ_MAX CMP_QUEUE_SZ_64K #define CMP_QSIZE_SHIFT (10) /* 1k */ diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c index 48ed3812..4f875c02 100644 --- a/drivers/net/thunderx/nicvf_ethdev.c +++ b/drivers/net/thunderx/nicvf_ethdev.c @@ -189,19 +189,16 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) } static int -nicvf_dev_get_reg_length(struct rte_eth_dev *dev __rte_unused) -{ - return nicvf_reg_get_count(); -} - -static int nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) { uint64_t *data = regs->data; struct nicvf *nic = nicvf_pmd_priv(dev); - if (data == NULL) - return -EINVAL; + if (data == NULL) { + regs->length = nicvf_reg_get_count(); + regs->width = THUNDERX_REG_BYTES; + return 0; + } /* Support only full register dump */ if ((regs->length == 0) || @@ -495,7 +492,7 @@ nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt) { const struct rte_memzone *rz; - uint32_t ring_size = desc_cnt * sizeof(union cq_entry_t); + uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t); rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size, NICVF_CQ_BASE_ALIGN_BYTES, nic->node); @@ -518,7 +515,7 @@ nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt) { const struct rte_memzone *rz; - uint32_t ring_size = desc_cnt * sizeof(union sq_entry_t); + uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t); rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size, NICVF_SQ_BASE_ALIGN_BYTES, nic->node); @@ -551,7 +548,7 @@ nicvf_qset_rbdr_alloc(struct nicvf *nic, uint32_t desc_cnt, uint32_t buffsz) return -ENOMEM; } - ring_size = sizeof(struct rbdr_entry_t) * desc_cnt; + ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX; rz = rte_eth_dma_zone_reserve(nic->eth_dev, "rbdr", 0, ring_size, NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); if (rz == NULL) { @@ -1623,7 +1620,6 @@ static const struct eth_dev_ops nicvf_eth_dev_ops = { .rx_queue_count = nicvf_dev_rx_queue_count, .tx_queue_setup = nicvf_dev_tx_queue_setup, .tx_queue_release = nicvf_dev_tx_queue_release, - .get_reg_length = nicvf_dev_get_reg_length, .get_reg = nicvf_dev_get_regs, }; @@ -1783,9 +1779,9 @@ rte_nicvf_pmd_init(const char *name __rte_unused, const char *para __rte_unused) } static struct rte_driver rte_nicvf_driver = { - .name = "nicvf_driver", .type = PMD_PDEV, .init = rte_nicvf_pmd_init, }; -PMD_REGISTER_DRIVER(rte_nicvf_driver); +PMD_REGISTER_DRIVER(rte_nicvf_driver, thunderx_nicvf); +DRIVER_REGISTER_PCI_TABLE(thunderx_nicvf, pci_id_nicvf_map); diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h index 59fa19cf..34447e05 100644 --- a/drivers/net/thunderx/nicvf_ethdev.h +++ b/drivers/net/thunderx/nicvf_ethdev.h @@ -36,6 +36,7 @@ #include <rte_ethdev.h> #define THUNDERX_NICVF_PMD_VERSION "1.0" +#define THUNDERX_REG_BYTES 8 #define NICVF_INTR_POLL_INTERVAL_MS 50 #define NICVF_HALF_DUPLEX 0x00 diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 3b509465..7539cd49 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -303,6 +303,7 @@ destroy_device(int vid) struct internal_list *list; char ifname[PATH_MAX]; unsigned i; + struct rte_vhost_vring_state *state; rte_vhost_get_ifname(vid, ifname, sizeof(ifname)); list = find_internal_resource(ifname); @@ -345,6 +346,15 @@ destroy_device(int vid) vq->vid = -1; } + state = vring_states[eth_dev->data->port_id]; + rte_spinlock_lock(&state->lock); + for (i = 0; i <= state->max_vring; i++) { + state->cur[i] = false; + state->seen[i] = false; + } + state->max_vring = 0; + rte_spinlock_unlock(&state->lock); + RTE_LOG(INFO, PMD, "Connection closed\n"); _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC); @@ -915,10 +925,12 @@ rte_pmd_vhost_devuninit(const char *name) } static struct rte_driver pmd_vhost_drv = { - .name = "eth_vhost", .type = PMD_VDEV, .init = rte_pmd_vhost_devinit, .uninit = rte_pmd_vhost_devuninit, }; -PMD_REGISTER_DRIVER(pmd_vhost_drv); +PMD_REGISTER_DRIVER(pmd_vhost_drv, eth_vhost); +DRIVER_REGISTER_PARAM_STRING(eth_vhost, + "iface=<ifc> " + "queues=<int>"); diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 480daa37..850e3ba5 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -103,11 +103,8 @@ static int virtio_dev_queue_stats_mapping_set( * The set of PCI devices this driver supports */ static const struct rte_pci_id pci_id_virtio_map[] = { - -#define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" - -{ .vendor_id = 0, /* sentinel */ }, + { RTE_PCI_DEVICE(VIRTIO_PCI_VENDORID, VIRTIO_PCI_DEVICEID_MIN) }, + { .vendor_id = 0, /* sentinel */ }, }; struct rte_virtio_xstats_name_off { @@ -166,7 +163,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, ctrl->status = status; - if (!cvq && !cvq->vq) { + if (!cvq || !cvq->vq) { PMD_INIT_LOG(ERR, "Control queue is not supported."); return -1; } @@ -1571,4 +1568,5 @@ static struct rte_driver rte_virtio_driver = { .init = rte_virtio_pmd_init, }; -PMD_REGISTER_DRIVER(rte_virtio_driver); +PMD_REGISTER_DRIVER(rte_virtio_driver, virtio_net); +DRIVER_REGISTER_PCI_TABLE(virtio_net, pci_id_virtio_map); diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c index 242ad90d..d8fcc15e 100644 --- a/drivers/net/virtio/virtio_rxtx_simple.c +++ b/drivers/net/virtio/virtio_rxtx_simple.c @@ -301,7 +301,7 @@ static inline void virtio_xmit_cleanup(struct virtqueue *vq) { uint16_t i, desc_idx; - int nb_free = 0; + uint32_t nb_free = 0; struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ]; desc_idx = (uint16_t)(vq->vq_used_cons_idx & @@ -319,13 +319,16 @@ virtio_xmit_cleanup(struct virtqueue *vq) free[nb_free++] = m; else { rte_mempool_put_bulk(free[0]->pool, - (void **)free, nb_free); + (void **)free, + RTE_MIN(RTE_DIM(free), + nb_free)); free[0] = m; nb_free = 1; } } } - rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + rte_mempool_put_bulk(free[0]->pool, (void **)free, + RTE_MIN(RTE_DIM(free), nb_free)); } else { for (i = 1; i < VIRTIO_TX_FREE_NR; i++) { m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie; diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c index a2b0687f..082e8217 100644 --- a/drivers/net/virtio/virtio_user/vhost_user.c +++ b/drivers/net/virtio/virtio_user/vhost_user.c @@ -181,7 +181,7 @@ get_hugepage_file_info(struct hugepage_file_info huges[], int max) } huges[idx].addr = v_start; huges[idx].size = v_end - v_start; - strcpy(huges[idx].path, tmp); + snprintf(huges[idx].path, PATH_MAX, "%s", tmp); idx++; } @@ -392,7 +392,8 @@ vhost_user_setup(const char *path) } flag = fcntl(fd, F_GETFD); - fcntl(fd, F_SETFD, flag | FD_CLOEXEC); + if (fcntl(fd, F_SETFD, flag | FD_CLOEXEC) < 0) + PMD_DRV_LOG(WARNING, "fcntl failed, %s", strerror(errno)); memset(&un, 0, sizeof(un)); un.sun_family = AF_UNIX; diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index 3d12a320..376c9cf5 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -63,12 +63,12 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) /* May use invalid flag, but some backend leverages kickfd and callfd as * criteria to judge if dev is alive. so finally we use real event_fd. */ - callfd = eventfd(0, O_CLOEXEC | O_NONBLOCK); + callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); if (callfd < 0) { PMD_DRV_LOG(ERR, "callfd error, %s\n", strerror(errno)); return -1; } - kickfd = eventfd(0, O_CLOEXEC | O_NONBLOCK); + kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK); if (kickfd < 0) { close(callfd); PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno)); @@ -181,7 +181,7 @@ int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, int cq, int queue_size, const char *mac) { - strncpy(dev->path, path, PATH_MAX); + snprintf(dev->path, PATH_MAX, "%s", path); dev->max_queue_pairs = queues; dev->queue_pairs = 1; /* mq disabled by default */ dev->queue_size = queue_size; diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c index 5ab24711..782d7d38 100644 --- a/drivers/net/virtio/virtio_user_ethdev.c +++ b/drivers/net/virtio/virtio_user_ethdev.c @@ -320,7 +320,7 @@ virtio_user_eth_dev_alloc(const char *name) static int virtio_user_pmd_devinit(const char *name, const char *params) { - struct rte_kvargs *kvlist; + struct rte_kvargs *kvlist = NULL; struct rte_eth_dev *eth_dev; struct virtio_hw *hw; uint64_t queues = VIRTIO_USER_DEF_Q_NUM; @@ -343,31 +343,60 @@ virtio_user_pmd_devinit(const char *name, const char *params) } if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) - rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, - &get_string_arg, &path); + ret = rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, + &get_string_arg, &path); + if (ret < 0) { + PMD_INIT_LOG(ERR, "error to parse %s", + VIRTIO_USER_ARG_PATH); + goto end; + } else { PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio-user\n", VIRTIO_USER_ARG_QUEUE_SIZE); goto end; } - if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) - rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, - &get_string_arg, &mac_addr); + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) { + ret = rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, + &get_string_arg, &mac_addr); + if (ret < 0) { + PMD_INIT_LOG(ERR, "error to parse %s", + VIRTIO_USER_ARG_MAC); + goto end; + } + } - if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) - rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, - &get_integer_arg, &queue_size); + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) { + ret = rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, + &get_integer_arg, &queue_size); + if (ret < 0) { + PMD_INIT_LOG(ERR, "error to parse %s", + VIRTIO_USER_ARG_QUEUE_SIZE); + goto end; + } + } - if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) - rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, - &get_integer_arg, &queues); + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) { + ret = rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, + &get_integer_arg, &queues); + if (ret < 0) { + PMD_INIT_LOG(ERR, "error to parse %s", + VIRTIO_USER_ARG_QUEUES_NUM); + goto end; + } + } - if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) - rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, - &get_integer_arg, &cq); - else if (queues > 1) + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) { + ret = rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, + &get_integer_arg, &cq); + if (ret < 0) { + PMD_INIT_LOG(ERR, "error to parse %s", + VIRTIO_USER_ARG_CQ_NUM); + goto end; + } + } else if (queues > 1) { cq = 1; + } if (queues > 1 && cq == 0) { PMD_INIT_LOG(ERR, "multi-q requires ctrl-q"); @@ -393,6 +422,8 @@ virtio_user_pmd_devinit(const char *name, const char *params) ret = 0; end: + if (kvlist) + rte_kvargs_free(kvlist); if (path) free(path); if (mac_addr) @@ -431,10 +462,15 @@ virtio_user_pmd_devuninit(const char *name) } static struct rte_driver virtio_user_driver = { - .name = "virtio-user", .type = PMD_VDEV, .init = virtio_user_pmd_devinit, .uninit = virtio_user_pmd_devuninit, }; -PMD_REGISTER_DRIVER(virtio_user_driver); +PMD_REGISTER_DRIVER(virtio_user_driver, virtio_user); +DRIVER_REGISTER_PARAM_STRING(virtio_user, + "path=<path> " + "mac=<mac addr> " + "cq=<int> " + "queue_size=<int> " + "queues=<int>"); diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index 29b469cc..58742153 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -100,12 +100,11 @@ static void vmxnet3_process_events(struct vmxnet3_hw *); /* * The set of PCI devices this driver supports */ +#define VMWARE_PCI_VENDOR_ID 0x15AD +#define VMWARE_DEV_ID_VMXNET3 0x07B0 static const struct rte_pci_id pci_id_vmxnet3_map[] = { - -#define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, -#include "rte_pci_dev_ids.h" - -{ .vendor_id = 0, /* sentinel */ }, + { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) }, + { .vendor_id = 0, /* sentinel */ }, }; static const struct eth_dev_ops vmxnet3_eth_dev_ops = { @@ -954,4 +953,5 @@ static struct rte_driver rte_vmxnet3_driver = { .init = rte_vmxnet3_pmd_init, }; -PMD_REGISTER_DRIVER(rte_vmxnet3_driver); +PMD_REGISTER_DRIVER(rte_vmxnet3_driver, vmxnet3); +DRIVER_REGISTER_PCI_TABLE(vmxnet3, pci_id_vmxnet3_map); diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.c b/drivers/net/xenvirt/rte_eth_xenvirt.c index 3e45808f..99f6cc81 100644 --- a/drivers/net/xenvirt/rte_eth_xenvirt.c +++ b/drivers/net/xenvirt/rte_eth_xenvirt.c @@ -760,10 +760,11 @@ rte_pmd_xenvirt_devuninit(const char *name) } static struct rte_driver pmd_xenvirt_drv = { - .name = "eth_xenvirt", .type = PMD_VDEV, .init = rte_pmd_xenvirt_devinit, .uninit = rte_pmd_xenvirt_devuninit, }; -PMD_REGISTER_DRIVER(pmd_xenvirt_drv); +PMD_REGISTER_DRIVER(pmd_xenvirt_drv, eth_xenvirt); +DRIVER_REGISTER_PARAM_STRING(eth_xenvirt, + "mac=<mac addr>"); diff --git a/examples/Makefile b/examples/Makefile index f650d3ec..18b41b90 100644 --- a/examples/Makefile +++ b/examples/Makefile @@ -54,7 +54,7 @@ endif ifeq ($(CONFIG_RTE_LIBRTE_ACL)$(CONFIG_RTE_LIBRTE_HASH)$(CONFIG_RTE_LIBRTE_LPM),yyy) DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += ipsec-secgw endif -DIRS-y += ipv4_multicast +DIRS-$(CONFIG_RTE_LIBRTE_HASH) += ipv4_multicast DIRS-$(CONFIG_RTE_LIBRTE_KNI) += kni DIRS-y += l2fwd ifneq ($(PQOS_INSTALL_PATH),) @@ -65,9 +65,11 @@ DIRS-$(CONFIG_RTE_LIBRTE_IVSHMEM) += l2fwd-ivshmem DIRS-$(CONFIG_RTE_LIBRTE_JOBSTATS) += l2fwd-jobstats DIRS-y += l2fwd-keepalive DIRS-y += l2fwd-keepalive/ka-agent +ifeq ($(CONFIG_RTE_LIBRTE_HASH),y) DIRS-$(CONFIG_RTE_LIBRTE_LPM) += l3fwd +endif DIRS-$(CONFIG_RTE_LIBRTE_ACL) += l3fwd-acl -ifeq ($(CONFIG_RTE_LIBRTE_LPM),y) +ifeq ($(CONFIG_RTE_LIBRTE_LPM)$(CONFIG_RTE_LIBRTE_HASH),yy) DIRS-$(CONFIG_RTE_LIBRTE_POWER) += l3fwd-power DIRS-y += l3fwd-vf endif @@ -82,7 +84,9 @@ DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched DIRS-y += quota_watermark DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks DIRS-y += skeleton +ifeq ($(CONFIG_RTE_LIBRTE_HASH),y) DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += tep_termination +endif DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += timer DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost DIRS-$(CONFIG_RTE_LIBRTE_XEN_DOM0) += vhost_xen diff --git a/examples/bond/main.c b/examples/bond/main.c index 53bd0441..776fad0a 100644 --- a/examples/bond/main.c +++ b/examples/bond/main.c @@ -590,10 +590,14 @@ static void cmd_stop_parsed(__attribute__((unused)) void *parsed_result, return; } global_flag_stru_p->LcoreMainIsRunning = 0; - rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore); - cmdline_printf(cl, - "lcore_main stopped on core:%d\n", - global_flag_stru_p->LcoreMainCore); + if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0) + cmdline_printf(cl, + "error: lcore_main can not stop on core:%d\n", + global_flag_stru_p->LcoreMainCore); + else + cmdline_printf(cl, + "lcore_main stopped on core:%d\n", + global_flag_stru_p->LcoreMainCore); rte_spinlock_unlock(&global_flag_stru_p->lock); } @@ -628,10 +632,14 @@ static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result, return; } global_flag_stru_p->LcoreMainIsRunning = 0; - rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore); - cmdline_printf(cl, - "lcore_main stopped on core:%d\n", - global_flag_stru_p->LcoreMainCore); + if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0) + cmdline_printf(cl, + "error: lcore_main can not stop on core:%d\n", + global_flag_stru_p->LcoreMainCore); + else + cmdline_printf(cl, + "lcore_main stopped on core:%d\n", + global_flag_stru_p->LcoreMainCore); rte_spinlock_unlock(&global_flag_stru_p->lock); cmdline_quit(cl); } diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c index 54391f21..a1f91d45 100644 --- a/examples/ethtool/lib/rte_ethtool.c +++ b/examples/ethtool/lib/rte_ethtool.c @@ -46,6 +46,7 @@ int rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo) { struct rte_eth_dev_info dev_info; + struct rte_dev_reg_info reg_info; int n; if (drvinfo == NULL) @@ -65,7 +66,9 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo) dev_info.pci_dev->addr.domain, dev_info.pci_dev->addr.bus, dev_info.pci_dev->addr.devid, dev_info.pci_dev->addr.function); - n = rte_eth_dev_get_reg_length(port_id); + memset(®_info, 0, sizeof(reg_info)); + rte_eth_dev_get_reg_info(port_id, ®_info); + n = reg_info.length; if (n > 0) drvinfo->regdump_len = n; else @@ -86,12 +89,16 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo) int rte_ethtool_get_regs_len(uint8_t port_id) { - int count_regs; + struct rte_dev_reg_info reg_info; + int ret; + + memset(®_info, 0, sizeof(reg_info)); + + ret = rte_eth_dev_get_reg_info(port_id, ®_info); + if (ret) + return ret; - count_regs = rte_eth_dev_get_reg_length(port_id); - if (count_regs > 0) - return count_regs * sizeof(uint32_t); - return count_regs; + return reg_info.length * reg_info.width; } int diff --git a/examples/ip_pipeline/config/diagram-generator.py b/examples/ip_pipeline/config/diagram-generator.py new file mode 100755 index 00000000..f20cbcbb --- /dev/null +++ b/examples/ip_pipeline/config/diagram-generator.py @@ -0,0 +1,343 @@ +#! /usr/bin/python2 + +# BSD LICENSE +# +# Copyright(c) 2016 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# +# This script creates a visual representation for a configuration file used by +# the DPDK ip_pipeline application. +# +# The input configuration file is translated to an output file in DOT syntax, +# which is then used to create the image file using graphviz (www.graphviz.org). +# + +from __future__ import print_function +import argparse +import re +import os + +# +# Command to generate the image file +# +DOT_COMMAND = 'dot -Gsize=20,30 -Tpng %s > %s' + +# +# Layout of generated DOT file +# +DOT_INTRO = \ + '#\n# Command to generate image file:\n# \t%s\n#\n\n' +DOT_GRAPH_BEGIN = \ + 'digraph g {\n graph [ splines = true rankdir = "LR" ]\n' +DOT_NODE_LINK_RX = \ + ' "%s RX" [ shape = box style = filled fillcolor = yellowgreen ]\n' +DOT_NODE_LINK_TX = \ + ' "%s TX" [ shape = box style = filled fillcolor = yellowgreen ]\n' +DOT_NODE_KNI_RX = \ + ' "%s RX" [ shape = box style = filled fillcolor = orange ]\n' +DOT_NODE_KNI_TX = \ + ' "%s TX" [ shape = box style = filled fillcolor = orange ]\n' +DOT_NODE_TAP_RX = \ + ' "%s RX" [ shape = box style = filled fillcolor = gold ]\n' +DOT_NODE_TAP_TX = \ + ' "%s TX" [ shape = box style = filled fillcolor = gold ]\n' +DOT_NODE_SOURCE = \ + ' "%s" [ shape = box style = filled fillcolor = darkgreen ]\n' +DOT_NODE_SINK = \ + ' "%s" [ shape = box style = filled fillcolor = peachpuff ]\n' +DOT_NODE_PIPELINE = \ + ' "%s" [ shape = box style = filled fillcolor = royalblue ]\n' +DOT_EDGE_PKTQ = \ + ' "%s" -> "%s" [ label = "%s" color = gray ]\n' +DOT_GRAPH_END = \ + '}\n' + +# Relationships between the graph nodes and the graph edges: +# +# Edge ID | Edge Label | Writer Node | Reader Node | Dependencies +# --------+------------+-------------+---------------+-------------- +# RXQx.y | RXQx.y | LINKx | PIPELINEz | LINKx +# TXQx.y | TXQx.y | PIPELINEz | LINKx | LINKx +# SWQx | SWQx | PIPELINEy | PIPELINEz | - +# TMx | TMx | PIPELINEy | PIPELINEz | LINKx +# KNIx RX | KNIx | KNIx RX | PIPELINEy | KNIx, LINKx +# KNIx TX | KNIx | PIPELINEy | KNIx TX | KNIx, LINKx +# TAPx RX | TAPx | TAPx RX | PIPELINEy | TAPx +# TAPx TX | TAPx | PIPELINEy | TAPx TX | TAPx +# SOURCEx | SOURCEx | SOURCEx | PIPELINEy | SOURCEx +# SINKx | SINKx | PIPELINEy | SINKx | SINKx + +# +# Parse the input configuration file to detect the graph nodes and edges +# +def process_config_file(cfgfile): + edges = {} + links = set() + knis = set() + taps = set() + sources = set() + sinks = set() + pipelines = set() + pipeline = '' + + dotfile = cfgfile + '.txt' + imgfile = cfgfile + '.png' + + # + # Read configuration file + # + lines = open(cfgfile, 'r') + for line in lines: + # Remove any leading and trailing white space characters + line = line.strip() + + # Remove any comment at end of line + line, sep, tail = line.partition(';') + + # Look for next "PIPELINE" section + match = re.search(r'\[(PIPELINE\d+)\]', line) + if match: + pipeline = match.group(1) + continue + + # Look for next "pktq_in" section entry + match = re.search(r'pktq_in\s*=\s*(.+)', line) + if match: + pipelines.add(pipeline) + for q in re.findall('\S+', match.group(1)): + match_rxq = re.search(r'^RXQ(\d+)\.\d+$', q) + match_swq = re.search(r'^SWQ\d+$', q) + match_tm = re.search(r'^TM(\d+)$', q) + match_kni = re.search(r'^KNI(\d+)$', q) + match_tap = re.search(r'^TAP\d+$', q) + match_source = re.search(r'^SOURCE\d+$', q) + + # Set ID for the current packet queue (graph edge) + q_id = '' + if match_rxq or match_swq or match_tm or match_source: + q_id = q + elif match_kni or match_tap: + q_id = q + ' RX' + else: + print('Error: Unrecognized pktq_in element "%s"' % q) + return + + # Add current packet queue to the set of graph edges + if q_id not in edges: + edges[q_id] = {} + if 'label' not in edges[q_id]: + edges[q_id]['label'] = q + if 'readers' not in edges[q_id]: + edges[q_id]['readers'] = [] + if 'writers' not in edges[q_id]: + edges[q_id]['writers'] = [] + + # Add reader for the new edge + edges[q_id]['readers'].append(pipeline) + + # Check for RXQ + if match_rxq: + link = 'LINK' + str(match_rxq.group(1)) + edges[q_id]['writers'].append(link + ' RX') + links.add(link) + continue + + # Check for SWQ + if match_swq: + continue + + # Check for TM + if match_tm: + link = 'LINK' + str(match_tm.group(1)) + links.add(link) + continue + + # Check for KNI + if match_kni: + link = 'LINK' + str(match_kni.group(1)) + edges[q_id]['writers'].append(q_id) + knis.add(q) + links.add(link) + continue + + # Check for TAP + if match_tap: + edges[q_id]['writers'].append(q_id) + taps.add(q) + continue + + # Check for SOURCE + if match_source: + edges[q_id]['writers'].append(q) + sources.add(q) + continue + + continue + + # Look for next "pktq_out" section entry + match = re.search(r'pktq_out\s*=\s*(.+)', line) + if match: + for q in re.findall('\S+', match.group(1)): + match_txq = re.search(r'^TXQ(\d+)\.\d+$', q) + match_swq = re.search(r'^SWQ\d+$', q) + match_tm = re.search(r'^TM(\d+)$', q) + match_kni = re.search(r'^KNI(\d+)$', q) + match_tap = re.search(r'^TAP(\d+)$', q) + match_sink = re.search(r'^SINK(\d+)$', q) + + # Set ID for the current packet queue (graph edge) + q_id = '' + if match_txq or match_swq or match_tm or match_sink: + q_id = q + elif match_kni or match_tap: + q_id = q + ' TX' + else: + print('Error: Unrecognized pktq_out element "%s"' % q) + return + + # Add current packet queue to the set of graph edges + if q_id not in edges: + edges[q_id] = {} + if 'label' not in edges[q_id]: + edges[q_id]['label'] = q + if 'readers' not in edges[q_id]: + edges[q_id]['readers'] = [] + if 'writers' not in edges[q_id]: + edges[q_id]['writers'] = [] + + # Add writer for the new edge + edges[q_id]['writers'].append(pipeline) + + # Check for TXQ + if match_txq: + link = 'LINK' + str(match_txq.group(1)) + edges[q_id]['readers'].append(link + ' TX') + links.add(link) + continue + + # Check for SWQ + if match_swq: + continue + + # Check for TM + if match_tm: + link = 'LINK' + str(match_tm.group(1)) + links.add(link) + continue + + # Check for KNI + if match_kni: + link = 'LINK' + str(match_kni.group(1)) + edges[q_id]['readers'].append(q_id) + knis.add(q) + links.add(link) + continue + + # Check for TAP + if match_tap: + edges[q_id]['readers'].append(q_id) + taps.add(q) + continue + + # Check for SINK + if match_sink: + edges[q_id]['readers'].append(q) + sinks.add(q) + continue + + continue + + # + # Write DOT file + # + print('Creating DOT file "%s" ...' % dotfile) + dot_cmd = DOT_COMMAND % (dotfile, imgfile) + file = open(dotfile, 'w') + file.write(DOT_INTRO % dot_cmd) + file.write(DOT_GRAPH_BEGIN) + + # Write the graph nodes to the DOT file + for l in sorted(links): + file.write(DOT_NODE_LINK_RX % l) + file.write(DOT_NODE_LINK_TX % l) + for k in sorted(knis): + file.write(DOT_NODE_KNI_RX % k) + file.write(DOT_NODE_KNI_TX % k) + for t in sorted(taps): + file.write(DOT_NODE_TAP_RX % t) + file.write(DOT_NODE_TAP_TX % t) + for s in sorted(sources): + file.write(DOT_NODE_SOURCE % s) + for s in sorted(sinks): + file.write(DOT_NODE_SINK % s) + for p in sorted(pipelines): + file.write(DOT_NODE_PIPELINE % p) + + # Write the graph edges to the DOT file + for q in sorted(edges.keys()): + rw = edges[q] + if 'writers' not in rw: + print('Error: "%s" has no writer' % q) + return + if 'readers' not in rw: + print('Error: "%s" has no reader' % q) + return + for w in rw['writers']: + for r in rw['readers']: + file.write(DOT_EDGE_PKTQ % (w, r, rw['label'])) + + file.write(DOT_GRAPH_END) + file.close() + + # + # Execute the DOT command to create the image file + # + print('Creating image file "%s" ...' % imgfile) + if os.system('which dot > /dev/null'): + print('Error: Unable to locate "dot" executable.' \ + 'Please install the "graphviz" package (www.graphviz.org).') + return + + os.system(dot_cmd) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description=\ + 'Create diagram for IP pipeline configuration file.') + + parser.add_argument( + '-f', + '--file', + help='input configuration file (e.g. "ip_pipeline.cfg")', + required=True) + + args = parser.parse_args() + + process_config_file(args.file) diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c index 8dc616d4..a1ce7127 100644 --- a/examples/l2fwd-crypto/main.c +++ b/examples/l2fwd-crypto/main.c @@ -243,7 +243,7 @@ struct l2fwd_crypto_statistics { } __rte_cache_aligned; struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS]; -struct l2fwd_crypto_statistics crypto_statistics[RTE_MAX_ETHPORTS]; +struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS]; /* A tsc-based timer responsible for triggering statistics printout */ #define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ @@ -628,7 +628,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options) unsigned lcore_id = rte_lcore_id(); uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; - unsigned i, j, portid, nb_rx; + unsigned i, j, portid, nb_rx, len; struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id]; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US; @@ -727,10 +727,18 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options) cur_tsc = rte_rdtsc(); /* - * TX burst queue drain + * Crypto device/TX burst queue drain */ diff_tsc = cur_tsc - prev_tsc; if (unlikely(diff_tsc > drain_tsc)) { + /* Enqueue all crypto ops remaining in buffers */ + for (i = 0; i < qconf->nb_crypto_devs; i++) { + cparams = &port_cparams[i]; + len = qconf->op_buf[cparams->dev_id].len; + l2fwd_crypto_send_burst(qconf, len, cparams); + qconf->op_buf[cparams->dev_id].len = 0; + } + /* Transmit all packets remaining in buffers */ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { if (qconf->pkt_buf[portid].len == 0) continue; diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index 7a79cd2c..acedd20e 100644 --- a/examples/l3fwd/main.c +++ b/examples/l3fwd/main.c @@ -311,20 +311,32 @@ init_lcore_rx_queues(void) static void print_usage(const char *prgname) { - printf ("%s [EAL options] -- -p PORTMASK -P" - " [--config (port,queue,lcore)[,(port,queue,lcore]]" - " [--enable-jumbo [--max-pkt-len PKTLEN]]\n" - " -p PORTMASK: hexadecimal bitmask of ports to configure\n" - " -P : enable promiscuous mode\n" - " -E : enable exact match\n" - " -L : enable longest prefix match\n" - " --config (port,queue,lcore): rx queues configuration\n" - " --eth-dest=X,MM:MM:MM:MM:MM:MM: optional, ethernet destination for port X\n" - " --no-numa: optional, disable numa awareness\n" - " --ipv6: optional, specify it if running ipv6 packets\n" - " --enable-jumbo: enable jumbo frame" - " which max packet len is PKTLEN in decimal (64-9600)\n" - " --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n", + printf("%s [EAL options] --" + " -p PORTMASK" + " [-P]" + " [-E]" + " [-L]" + " --config (port,queue,lcore)[,(port,queue,lcore)]" + " [--eth-dest=X,MM:MM:MM:MM:MM:MM]" + " [--enable-jumbo [--max-pkt-len PKTLEN]]" + " [--no-numa]" + " [--hash-entry-num]" + " [--ipv6]" + " [--parse-ptype]\n\n" + + " -p PORTMASK: Hexadecimal bitmask of ports to configure\n" + " -P : Enable promiscuous mode\n" + " -E : Enable exact match\n" + " -L : Enable longest prefix match (default)\n" + " --config (port,queue,lcore): Rx queue configuration\n" + " --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n" + " --enable-jumbo: Enable jumbo frames\n" + " --max-pkt-len: Under the premise of enabling jumbo,\n" + " maximum packet length in decimal (64-9600)\n" + " --no-numa: Disable numa awareness\n" + " --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n" + " --ipv6: Set if running ipv6 packets\n" + " --parse-ptype: Set to use software to analyze packet type\n\n", prgname); } diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c index ecb61c68..ad941a7a 100644 --- a/examples/multi_process/client_server_mp/mp_server/init.c +++ b/examples/multi_process/client_server_mp/mp_server/init.c @@ -60,7 +60,6 @@ #include <rte_ether.h> #include <rte_ethdev.h> #include <rte_malloc.h> -#include <rte_fbk_hash.h> #include <rte_string_fns.h> #include <rte_cycles.h> diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c index de54c674..a6dc12d5 100644 --- a/examples/multi_process/client_server_mp/mp_server/main.c +++ b/examples/multi_process/client_server_mp/mp_server/main.c @@ -65,7 +65,6 @@ #include <rte_ethdev.h> #include <rte_byteorder.h> #include <rte_malloc.h> -#include <rte_fbk_hash.h> #include <rte_string_fns.h> #include "common.h" diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c index 37575c27..8f1f15bb 100644 --- a/examples/tep_termination/vxlan_setup.c +++ b/examples/tep_termination/vxlan_setup.c @@ -249,7 +249,7 @@ vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m) struct rte_eth_tunnel_filter_conf tunnel_filter_conf; - if (unlikely(portid > VXLAN_N_PORTS)) { + if (unlikely(portid >= VXLAN_N_PORTS)) { RTE_LOG(INFO, VHOST_DATA, "(%d) WARNING: Not configuring device," "as already have %d ports for VXLAN.", diff --git a/examples/vhost/main.c b/examples/vhost/main.c index 3aff2cc8..3b98f429 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -332,8 +332,11 @@ port_init(uint8_t port) rx_rings = (uint16_t)dev_info.max_rx_queues; /* Configure ethernet device. */ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); - if (retval != 0) + if (retval != 0) { + RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n", + port, strerror(-retval)); return retval; + } /* Setup the queues. */ for (q = 0; q < rx_rings; q ++) { @@ -341,21 +344,30 @@ port_init(uint8_t port) rte_eth_dev_socket_id(port), rxconf, mbuf_pool); - if (retval < 0) + if (retval < 0) { + RTE_LOG(ERR, VHOST_PORT, + "Failed to setup rx queue %u of port %u: %s.\n", + q, port, strerror(-retval)); return retval; + } } for (q = 0; q < tx_rings; q ++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, rte_eth_dev_socket_id(port), txconf); - if (retval < 0) + if (retval < 0) { + RTE_LOG(ERR, VHOST_PORT, + "Failed to setup tx queue %u of port %u: %s.\n", + q, port, strerror(-retval)); return retval; + } } /* Start the device. */ retval = rte_eth_dev_start(port); if (retval < 0) { - RTE_LOG(ERR, VHOST_DATA, "Failed to start the device.\n"); + RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n", + port, strerror(-retval)); return retval; } diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h index 67e26ecb..47c3b9cd 100644 --- a/examples/vm_power_manager/channel_manager.h +++ b/examples/vm_power_manager/channel_manager.h @@ -41,7 +41,13 @@ extern "C" { #include <linux/limits.h> #include <sys/un.h> #include <rte_atomic.h> -#include "channel_commands.h" + +/* Maximum number of CPUs */ +#define CHANNEL_CMDS_MAX_CPUS 64 +#if CHANNEL_CMDS_MAX_CPUS > 64 +#error Maximum number of cores is 64, overflow is guaranteed to \ + cause problems with VM Power Management +#endif /* Maximum name length including '\0' terminator */ #define CHANNEL_MGR_MAX_NAME_LEN 64 diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h index 7768f0ae..affbdecc 100644 --- a/lib/librte_cryptodev/rte_cryptodev.h +++ b/lib/librte_cryptodev/rte_cryptodev.h @@ -49,17 +49,17 @@ extern "C" { #include "rte_crypto.h" #include "rte_dev.h" -#define CRYPTODEV_NAME_NULL_PMD ("cryptodev_null_pmd") +#define CRYPTODEV_NAME_NULL_PMD cryptodev_null_pmd /**< Null crypto PMD device name */ -#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd") +#define CRYPTODEV_NAME_AESNI_MB_PMD cryptodev_aesni_mb_pmd /**< AES-NI Multi buffer PMD device name */ -#define CRYPTODEV_NAME_AESNI_GCM_PMD ("cryptodev_aesni_gcm_pmd") +#define CRYPTODEV_NAME_AESNI_GCM_PMD cryptodev_aesni_gcm_pmd /**< AES-NI GCM PMD device name */ -#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd") +#define CRYPTODEV_NAME_QAT_SYM_PMD cryptodev_qat_sym_pmd /**< Intel QAT Symmetric Crypto PMD device name */ -#define CRYPTODEV_NAME_SNOW3G_PMD ("cryptodev_snow3g_pmd") +#define CRYPTODEV_NAME_SNOW3G_PMD cryptodev_snow3g_pmd /**< SNOW 3G PMD device name */ -#define CRYPTODEV_NAME_KASUMI_PMD ("cryptodev_kasumi_pmd") +#define CRYPTODEV_NAME_KASUMI_PMD cryptodev_kasumi_pmd /**< KASUMI PMD device name */ /** Crypto device type */ @@ -67,9 +67,9 @@ enum rte_cryptodev_type { RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */ RTE_CRYPTODEV_AESNI_GCM_PMD, /**< AES-NI GCM PMD */ RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */ - RTE_CRYPTODEV_KASUMI_PMD, /**< KASUMI PMD */ RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */ RTE_CRYPTODEV_SNOW3G_PMD, /**< SNOW 3G PMD */ + RTE_CRYPTODEV_KASUMI_PMD, /**< KASUMI PMD */ }; extern const char **rte_cyptodev_names; diff --git a/lib/librte_eal/common/eal_common_options.c b/lib/librte_eal/common/eal_common_options.c index 3efc90f0..0a594d7f 100644 --- a/lib/librte_eal/common/eal_common_options.c +++ b/lib/librte_eal/common/eal_common_options.c @@ -115,6 +115,15 @@ TAILQ_HEAD_INITIALIZER(solib_list); /* Default path of external loadable drivers */ static const char *default_solib_dir = RTE_EAL_PMD_PATH; +/* + * Stringified version of solib path used by pmdinfo.py + * Note: PLEASE DO NOT ALTER THIS without making a corresponding + * change to tools/pmdinfo.py + */ +static const char dpdk_solib_path[] __attribute__((used)) = +"DPDK_PLUGIN_PATH=" RTE_EAL_PMD_PATH; + + static int master_lcore_parsed; static int mem_parsed; diff --git a/lib/librte_eal/common/include/rte_dev.h b/lib/librte_eal/common/include/rte_dev.h index f1b55079..95789f9d 100644 --- a/lib/librte_eal/common/include/rte_dev.h +++ b/lib/librte_eal/common/include/rte_dev.h @@ -178,12 +178,30 @@ int rte_eal_vdev_init(const char *name, const char *args); */ int rte_eal_vdev_uninit(const char *name); -#define PMD_REGISTER_DRIVER(d)\ -void devinitfn_ ##d(void);\ -void __attribute__((constructor, used)) devinitfn_ ##d(void)\ +#define DRIVER_EXPORT_NAME_ARRAY(n, idx) n##idx[] + +#define DRIVER_EXPORT_NAME(name, idx) \ +static const char DRIVER_EXPORT_NAME_ARRAY(this_pmd_name, idx) \ +__attribute__((used)) = RTE_STR(name) + +#define PMD_REGISTER_DRIVER(drv, nm)\ +void devinitfn_ ##drv(void);\ +void __attribute__((constructor, used)) devinitfn_ ##drv(void)\ {\ - rte_eal_driver_register(&d);\ -} + (drv).name = RTE_STR(nm);\ + rte_eal_driver_register(&drv);\ +} \ +DRIVER_EXPORT_NAME(nm, __COUNTER__) + +#define DRV_EXP_TAG(name, tag) __##name##_##tag + +#define DRIVER_REGISTER_PCI_TABLE(name, table) \ +static const char DRV_EXP_TAG(name, pci_tbl_export)[] __attribute__((used)) = \ +RTE_STR(table) + +#define DRIVER_REGISTER_PARAM_STRING(name, str) \ +static const char DRV_EXP_TAG(name, param_string_export)[] \ +__attribute__((used)) = str #ifdef __cplusplus } diff --git a/lib/librte_eal/common/include/rte_pci_dev_ids.h b/lib/librte_eal/common/include/rte_pci_dev_ids.h index af39fbbd..6ec8ae8c 100644 --- a/lib/librte_eal/common/include/rte_pci_dev_ids.h +++ b/lib/librte_eal/common/include/rte_pci_dev_ids.h @@ -57,50 +57,6 @@ * */ -/** - * @file - * - * This file contains a list of the PCI device IDs recognised by DPDK, which - * can be used to fill out an array of structures describing the devices. - * - * Currently five families of devices are recognised: those supported by the - * IGB driver, by EM driver, those supported by the IXGBE driver, those - * supported by the BNXT driver, and by virtio driver which is a para - * virtualization driver running in guest virtual machine. The inclusion of - * these in an array built using this file depends on the definition of - * RTE_PCI_DEV_ID_DECL_BNXT - * RTE_PCI_DEV_ID_DECL_EM - * RTE_PCI_DEV_ID_DECL_IGB - * RTE_PCI_DEV_ID_DECL_IGBVF - * RTE_PCI_DEV_ID_DECL_IXGBE - * RTE_PCI_DEV_ID_DECL_IXGBEVF - * RTE_PCI_DEV_ID_DECL_I40E - * RTE_PCI_DEV_ID_DECL_I40EVF - * RTE_PCI_DEV_ID_DECL_VIRTIO - * at the time when this file is included. - * - * In order to populate an array, the user of this file must define this macro: - * RTE_PCI_DEV_ID_DECL_IXGBE(vendorID, deviceID). For example: - * - * @code - * struct device { - * int vend; - * int dev; - * }; - * - * struct device devices[] = { - * #define RTE_PCI_DEV_ID_DECL_IXGBE(vendorID, deviceID) {vend, dev}, - * #include <rte_pci_dev_ids.h> - * }; - * @endcode - * - * Note that this file can be included multiple times within the same file. - */ - -#ifndef RTE_PCI_DEV_ID_DECL_EM -#define RTE_PCI_DEV_ID_DECL_EM(vend, dev) -#endif - #ifndef RTE_PCI_DEV_ID_DECL_IGB #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) #endif @@ -117,214 +73,11 @@ #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) #endif -#ifndef RTE_PCI_DEV_ID_DECL_I40E -#define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) -#endif - -#ifndef RTE_PCI_DEV_ID_DECL_I40EVF -#define RTE_PCI_DEV_ID_DECL_I40EVF(vend, dev) -#endif - -#ifndef RTE_PCI_DEV_ID_DECL_VIRTIO -#define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) -#endif - -#ifndef RTE_PCI_DEV_ID_DECL_VMXNET3 -#define RTE_PCI_DEV_ID_DECL_VMXNET3(vend, dev) -#endif - -#ifndef RTE_PCI_DEV_ID_DECL_FM10K -#define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) -#endif - -#ifndef RTE_PCI_DEV_ID_DECL_FM10KVF -#define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) -#endif - -#ifndef RTE_PCI_DEV_ID_DECL_ENIC -#define RTE_PCI_DEV_ID_DECL_ENIC(vend, dev) -#endif - -#ifndef RTE_PCI_DEV_ID_DECL_BNX2X -#define RTE_PCI_DEV_ID_DECL_BNX2X(vend, dev) -#endif - -#ifndef RTE_PCI_DEV_ID_DECL_BNX2XVF -#define RTE_PCI_DEV_ID_DECL_BNX2XVF(vend, dev) -#endif - -#ifndef RTE_PCI_DEV_ID_DECL_BNXT -#define RTE_PCI_DEV_ID_DECL_BNXT(vend, dev) -#endif - #ifndef PCI_VENDOR_ID_INTEL /** Vendor ID used by Intel devices */ #define PCI_VENDOR_ID_INTEL 0x8086 #endif -#ifndef PCI_VENDOR_ID_QUMRANET -/** Vendor ID used by virtio devices */ -#define PCI_VENDOR_ID_QUMRANET 0x1AF4 -#endif - -#ifndef PCI_VENDOR_ID_VMWARE -/** Vendor ID used by VMware devices */ -#define PCI_VENDOR_ID_VMWARE 0x15AD -#endif - -#ifndef PCI_VENDOR_ID_CISCO -/** Vendor ID used by Cisco VIC devices */ -#define PCI_VENDOR_ID_CISCO 0x1137 -#endif - -#ifndef PCI_VENDOR_ID_BROADCOM -/** Vendor ID used by Broadcom devices */ -#define PCI_VENDOR_ID_BROADCOM 0x14E4 -#endif - -/******************** Physical EM devices from e1000_hw.h ********************/ - -#define E1000_DEV_ID_82542 0x1000 -#define E1000_DEV_ID_82543GC_FIBER 0x1001 -#define E1000_DEV_ID_82543GC_COPPER 0x1004 -#define E1000_DEV_ID_82544EI_COPPER 0x1008 -#define E1000_DEV_ID_82544EI_FIBER 0x1009 -#define E1000_DEV_ID_82544GC_COPPER 0x100C -#define E1000_DEV_ID_82544GC_LOM 0x100D -#define E1000_DEV_ID_82540EM 0x100E -#define E1000_DEV_ID_82540EM_LOM 0x1015 -#define E1000_DEV_ID_82540EP_LOM 0x1016 -#define E1000_DEV_ID_82540EP 0x1017 -#define E1000_DEV_ID_82540EP_LP 0x101E -#define E1000_DEV_ID_82545EM_COPPER 0x100F -#define E1000_DEV_ID_82545EM_FIBER 0x1011 -#define E1000_DEV_ID_82545GM_COPPER 0x1026 -#define E1000_DEV_ID_82545GM_FIBER 0x1027 -#define E1000_DEV_ID_82545GM_SERDES 0x1028 -#define E1000_DEV_ID_82546EB_COPPER 0x1010 -#define E1000_DEV_ID_82546EB_FIBER 0x1012 -#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D -#define E1000_DEV_ID_82546GB_COPPER 0x1079 -#define E1000_DEV_ID_82546GB_FIBER 0x107A -#define E1000_DEV_ID_82546GB_SERDES 0x107B -#define E1000_DEV_ID_82546GB_PCIE 0x108A -#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 -#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 -#define E1000_DEV_ID_82541EI 0x1013 -#define E1000_DEV_ID_82541EI_MOBILE 0x1018 -#define E1000_DEV_ID_82541ER_LOM 0x1014 -#define E1000_DEV_ID_82541ER 0x1078 -#define E1000_DEV_ID_82541GI 0x1076 -#define E1000_DEV_ID_82541GI_LF 0x107C -#define E1000_DEV_ID_82541GI_MOBILE 0x1077 -#define E1000_DEV_ID_82547EI 0x1019 -#define E1000_DEV_ID_82547EI_MOBILE 0x101A -#define E1000_DEV_ID_82547GI 0x1075 -#define E1000_DEV_ID_82571EB_COPPER 0x105E -#define E1000_DEV_ID_82571EB_FIBER 0x105F -#define E1000_DEV_ID_82571EB_SERDES 0x1060 -#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 -#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA -#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 -#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 -#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 -#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC -#define E1000_DEV_ID_82572EI_COPPER 0x107D -#define E1000_DEV_ID_82572EI_FIBER 0x107E -#define E1000_DEV_ID_82572EI_SERDES 0x107F -#define E1000_DEV_ID_82572EI 0x10B9 -#define E1000_DEV_ID_82573E 0x108B -#define E1000_DEV_ID_82573E_IAMT 0x108C -#define E1000_DEV_ID_82573L 0x109A -#define E1000_DEV_ID_82574L 0x10D3 -#define E1000_DEV_ID_82574LA 0x10F6 -#define E1000_DEV_ID_82583V 0x150C -#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 -#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 -#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA -#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB -#define E1000_DEV_ID_ICH8_82567V_3 0x1501 -#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 -#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A -#define E1000_DEV_ID_ICH8_IGP_C 0x104B -#define E1000_DEV_ID_ICH8_IFE 0x104C -#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 -#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 -#define E1000_DEV_ID_ICH8_IGP_M 0x104D -#define E1000_DEV_ID_ICH9_IGP_M 0x10BF -#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 -#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB -#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD -#define E1000_DEV_ID_ICH9_BM 0x10E5 -#define E1000_DEV_ID_ICH9_IGP_C 0x294C -#define E1000_DEV_ID_ICH9_IFE 0x10C0 -#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 -#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 -#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC -#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD -#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE -#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE -#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF -#define E1000_DEV_ID_ICH10_D_BM_V 0x1525 - -#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA -#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB -#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF -#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 -#define E1000_DEV_ID_PCH2_LV_LM 0x1502 -#define E1000_DEV_ID_PCH2_LV_V 0x1503 -#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A -#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B -#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A -#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 -#define E1000_DEV_ID_PCH_I218_LM2 0x15A0 -#define E1000_DEV_ID_PCH_I218_V2 0x15A1 -#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 -#define E1000_DEV_ID_PCH_I218_V3 0x15A3 - - -/* - * Tested (supported) on VM emulated HW. - */ - -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82540EM) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82545EM_COPPER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82545EM_FIBER) - -/* - * Tested (supported) on real HW. - */ - -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82546EB_COPPER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82546EB_FIBER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82546EB_QUAD_COPPER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_COPPER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_FIBER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_SERDES) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82572EI_COPPER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82572EI_FIBER) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82572EI_SERDES) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82572EI) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82573L) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82574L) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82574LA) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82583V) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_PCH_LPT_I217_LM) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_PCH_LPT_I217_V) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_PCH_I218_LM2) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_PCH_I218_V2) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_PCH_I218_LM3) -RTE_PCI_DEV_ID_DECL_EM(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_PCH_I218_V3) - - /******************** Physical IGB devices from e1000_hw.h ********************/ #define E1000_DEV_ID_82576 0x10C9 @@ -528,60 +281,6 @@ RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_KR) RTE_PCI_DEV_ID_DECL_IXGBE(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BYPASS) #endif -/*************** Physical I40E devices from i40e_type.h *****************/ - -#define I40E_DEV_ID_SFP_XL710 0x1572 -#define I40E_DEV_ID_QEMU 0x1574 -#define I40E_DEV_ID_KX_B 0x1580 -#define I40E_DEV_ID_KX_C 0x1581 -#define I40E_DEV_ID_QSFP_A 0x1583 -#define I40E_DEV_ID_QSFP_B 0x1584 -#define I40E_DEV_ID_QSFP_C 0x1585 -#define I40E_DEV_ID_10G_BASE_T 0x1586 -#define I40E_DEV_ID_20G_KR2 0x1587 -#define I40E_DEV_ID_20G_KR2_A 0x1588 -#define I40E_DEV_ID_10G_BASE_T4 0x1589 -#define I40E_DEV_ID_25G_B 0x158A -#define I40E_DEV_ID_25G_SFP28 0x158B -#define I40E_DEV_ID_X722_A0 0x374C -#define I40E_DEV_ID_KX_X722 0x37CE -#define I40E_DEV_ID_QSFP_X722 0x37CF -#define I40E_DEV_ID_SFP_X722 0x37D0 -#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 -#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define I40E_DEV_ID_QSFP_I_X722 0x37D4 - -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_SFP_XL710) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QEMU) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_KX_B) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_KX_C) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_A) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_B) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_C) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_BASE_T) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_20G_KR2) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_20G_KR2_A) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_BASE_T4) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_25G_B) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_25G_SFP28) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_X722_A0) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_KX_X722) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_X722) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_SFP_X722) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_1G_BASE_T_X722) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_10G_BASE_T_X722) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_SFP_I_X722) -RTE_PCI_DEV_ID_DECL_I40E(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_QSFP_I_X722) - -/*************** Physical FM10K devices from fm10k_type.h ***************/ - -#define FM10K_DEV_ID_PF 0x15A4 -#define FM10K_DEV_ID_SDI_FM10420_QDA2 0x15D0 - -RTE_PCI_DEV_ID_DECL_FM10K(PCI_VENDOR_ID_INTEL, FM10K_DEV_ID_PF) -RTE_PCI_DEV_ID_DECL_FM10K(PCI_VENDOR_ID_INTEL, FM10K_DEV_ID_SDI_FM10420_QDA2) - /****************** Virtual IGB devices from e1000_hw.h ******************/ #define E1000_DEV_ID_82576_VF 0x10CA @@ -618,129 +317,10 @@ RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_A_VF_HV) RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_VF) RTE_PCI_DEV_ID_DECL_IXGBEVF(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV) -/****************** Virtual I40E devices from i40e_type.h ********************/ - -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 -#define I40E_DEV_ID_X722_A0_VF 0x374D -#define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 - -RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF) -RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_VF_HV) -RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_X722_A0_VF) -RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_X722_VF) -RTE_PCI_DEV_ID_DECL_I40EVF(PCI_VENDOR_ID_INTEL, I40E_DEV_ID_X722_VF_HV) - -/****************** Virtio devices from virtio.h ******************/ - -#define QUMRANET_DEV_ID_VIRTIO 0x1000 - -RTE_PCI_DEV_ID_DECL_VIRTIO(PCI_VENDOR_ID_QUMRANET, QUMRANET_DEV_ID_VIRTIO) - -/****************** VMware VMXNET3 devices ******************/ - -#define VMWARE_DEV_ID_VMXNET3 0x07B0 - -RTE_PCI_DEV_ID_DECL_VMXNET3(PCI_VENDOR_ID_VMWARE, VMWARE_DEV_ID_VMXNET3) - -/*************** Virtual FM10K devices from fm10k_type.h ***************/ - -#define FM10K_DEV_ID_VF 0x15A5 - -RTE_PCI_DEV_ID_DECL_FM10KVF(PCI_VENDOR_ID_INTEL, FM10K_DEV_ID_VF) - -/****************** Cisco VIC devices ******************/ - -#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ -#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */ - -RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) -RTE_PCI_DEV_ID_DECL_ENIC(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) - -/****************** QLogic devices ******************/ - -/* Broadcom/QLogic BNX2X */ -#define BNX2X_DEV_ID_57710 0x164e -#define BNX2X_DEV_ID_57711 0x164f -#define BNX2X_DEV_ID_57711E 0x1650 -#define BNX2X_DEV_ID_57712 0x1662 -#define BNX2X_DEV_ID_57712_MF 0x1663 -#define BNX2X_DEV_ID_57712_VF 0x166f -#define BNX2X_DEV_ID_57713 0x1651 -#define BNX2X_DEV_ID_57713E 0x1652 -#define BNX2X_DEV_ID_57800 0x168a -#define BNX2X_DEV_ID_57800_MF 0x16a5 -#define BNX2X_DEV_ID_57800_VF 0x16a9 -#define BNX2X_DEV_ID_57810 0x168e -#define BNX2X_DEV_ID_57810_MF 0x16ae -#define BNX2X_DEV_ID_57810_VF 0x16af -#define BNX2X_DEV_ID_57811 0x163d -#define BNX2X_DEV_ID_57811_MF 0x163e -#define BNX2X_DEV_ID_57811_VF 0x163f - -#define BNX2X_DEV_ID_57840_OBS 0x168d -#define BNX2X_DEV_ID_57840_OBS_MF 0x16ab -#define BNX2X_DEV_ID_57840_4_10 0x16a1 -#define BNX2X_DEV_ID_57840_2_20 0x16a2 -#define BNX2X_DEV_ID_57840_MF 0x16a4 -#define BNX2X_DEV_ID_57840_VF 0x16ad - -RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57800) -RTE_PCI_DEV_ID_DECL_BNX2XVF(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57800_VF) -RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57711) -RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57810) -RTE_PCI_DEV_ID_DECL_BNX2XVF(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57810_VF) -RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57811) -RTE_PCI_DEV_ID_DECL_BNX2XVF(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57811_VF) -RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_OBS) -RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_4_10) -RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_2_20) -RTE_PCI_DEV_ID_DECL_BNX2XVF(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_VF) -#ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT -RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57810_MF) -RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57811_MF) -RTE_PCI_DEV_ID_DECL_BNX2X(PCI_VENDOR_ID_BROADCOM, BNX2X_DEV_ID_57840_MF) -#endif - -/****************** Broadcom bnxt devices ******************/ - -#define BROADCOM_DEV_ID_57301 0x16c8 -#define BROADCOM_DEV_ID_57302 0x16c9 -#define BROADCOM_DEV_ID_57304_PF 0x16ca -#define BROADCOM_DEV_ID_57304_VF 0x16cb -#define BROADCOM_DEV_ID_57402 0x16d0 -#define BROADCOM_DEV_ID_57404 0x16d1 -#define BROADCOM_DEV_ID_57406_PF 0x16d2 -#define BROADCOM_DEV_ID_57406_VF 0x16d3 -#define BROADCOM_DEV_ID_57406_MF 0x16d4 -#define BROADCOM_DEV_ID_57314 0x16df - -RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) -RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) -RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) -RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_VF) -RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57402) -RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57404) -RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_PF) -RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_VF) -RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57406_MF) -RTE_PCI_DEV_ID_DECL_BNXT(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) - /* * Undef all RTE_PCI_DEV_ID_DECL_* here. */ -#undef RTE_PCI_DEV_ID_DECL_BNX2X -#undef RTE_PCI_DEV_ID_DECL_BNX2XVF -#undef RTE_PCI_DEV_ID_DECL_EM #undef RTE_PCI_DEV_ID_DECL_IGB #undef RTE_PCI_DEV_ID_DECL_IGBVF #undef RTE_PCI_DEV_ID_DECL_IXGBE #undef RTE_PCI_DEV_ID_DECL_IXGBEVF -#undef RTE_PCI_DEV_ID_DECL_I40E -#undef RTE_PCI_DEV_ID_DECL_I40EVF -#undef RTE_PCI_DEV_ID_DECL_VIRTIO -#undef RTE_PCI_DEV_ID_DECL_VMXNET3 -#undef RTE_PCI_DEV_ID_DECL_FM10K -#undef RTE_PCI_DEV_ID_DECL_FM10KVF -#undef RTE_PCI_DEV_ID_DECL_BNXT diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h index dbe09975..37102227 100644 --- a/lib/librte_eal/common/include/rte_version.h +++ b/lib/librte_eal/common/include/rte_version.h @@ -77,7 +77,7 @@ extern "C" { * 0-15 = release candidates * 16 = release */ -#define RTE_VER_RELEASE 1 +#define RTE_VER_RELEASE 2 /** * Macro to compute a version number usable for comparisons diff --git a/lib/librte_eal/common/malloc_elem.c b/lib/librte_eal/common/malloc_elem.c index 27e9925d..42568e1d 100644 --- a/lib/librte_eal/common/malloc_elem.c +++ b/lib/librte_eal/common/malloc_elem.c @@ -275,11 +275,14 @@ malloc_elem_free(struct malloc_elem *elem) return -1; rte_spinlock_lock(&(elem->heap->lock)); + size_t sz = elem->size - sizeof(*elem); + uint8_t *ptr = (uint8_t *)&elem[1]; struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size); if (next->state == ELEM_FREE){ /* remove from free list, join to this one */ elem_free_list_remove(next); join_elem(elem, next); + sz += sizeof(*elem); } /* check if previous element is free, if so join with it and return, @@ -288,15 +291,17 @@ malloc_elem_free(struct malloc_elem *elem) if (elem->prev != NULL && elem->prev->state == ELEM_FREE) { elem_free_list_remove(elem->prev); join_elem(elem->prev, elem); - malloc_elem_free_list_insert(elem->prev); - } - /* otherwise add ourselves to the free list */ - else { - malloc_elem_free_list_insert(elem); - elem->pad = 0; + sz += sizeof(*elem); + ptr -= sizeof(*elem); + elem = elem->prev; } + malloc_elem_free_list_insert(elem); + /* decrease heap's count of allocated elements */ elem->heap->alloc_count--; + + memset(ptr, 0, sz); + rte_spinlock_unlock(&(elem->heap->lock)); return 0; diff --git a/lib/librte_eal/common/rte_malloc.c b/lib/librte_eal/common/rte_malloc.c index 47deb007..f4a88352 100644 --- a/lib/librte_eal/common/rte_malloc.c +++ b/lib/librte_eal/common/rte_malloc.c @@ -123,11 +123,7 @@ rte_malloc(const char *type, size_t size, unsigned align) void * rte_zmalloc_socket(const char *type, size_t size, unsigned align, int socket) { - void *ptr = rte_malloc_socket(type, size, align, socket); - - if (ptr != NULL) - memset(ptr, 0, size); - return ptr; + return rte_malloc_socket(type, size, align, socket); } /* diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile index 30b30f33..1a976931 100644 --- a/lib/librte_eal/linuxapp/eal/Makefile +++ b/lib/librte_eal/linuxapp/eal/Makefile @@ -66,10 +66,11 @@ SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_xen_memory.c endif SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_thread.c SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_log.c +SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio.c +SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_vfio_mp_sync.c SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_pci.c SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_pci_uio.c SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_pci_vfio.c -SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_pci_vfio_mp_sync.c SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_debug.c SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_lcore.c SRCS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += eal_timer.c @@ -110,7 +111,7 @@ CFLAGS_eal_common_cpuflags.o := $(CPUFLAGS_LIST) CFLAGS_eal.o := -D_GNU_SOURCE CFLAGS_eal_interrupts.o := -D_GNU_SOURCE -CFLAGS_eal_pci_vfio_mp_sync.o := -D_GNU_SOURCE +CFLAGS_eal_vfio_mp_sync.o := -D_GNU_SOURCE CFLAGS_eal_timer.o := -D_GNU_SOURCE CFLAGS_eal_lcore.o := -D_GNU_SOURCE CFLAGS_eal_thread.o := -D_GNU_SOURCE diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c index 543ef869..3fb2188f 100644 --- a/lib/librte_eal/linuxapp/eal/eal.c +++ b/lib/librte_eal/linuxapp/eal/eal.c @@ -82,6 +82,7 @@ #include "eal_filesystem.h" #include "eal_hugepages.h" #include "eal_options.h" +#include "eal_vfio.h" #define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL) @@ -701,6 +702,33 @@ rte_eal_iopl_init(void) return 0; } +#ifdef VFIO_PRESENT +static int rte_eal_vfio_setup(void) +{ + int vfio_enabled = 0; + + if (!internal_config.no_pci) { + pci_vfio_enable(); + vfio_enabled |= pci_vfio_is_enabled(); + } + + if (vfio_enabled) { + + /* if we are primary process, create a thread to communicate with + * secondary processes. the thread will use a socket to wait for + * requests from secondary process to send open file descriptors, + * because VFIO does not allow multiple open descriptors on a group or + * VFIO container. + */ + if (internal_config.process_type == RTE_PROC_PRIMARY && + vfio_mp_sync_setup() < 0) + return -1; + } + + return 0; +} +#endif + /* Launch threads, called at application init(). */ int rte_eal_init(int argc, char **argv) @@ -764,6 +792,11 @@ rte_eal_init(int argc, char **argv) if (rte_eal_pci_init() < 0) rte_panic("Cannot init PCI\n"); +#ifdef VFIO_PRESENT + if (rte_eal_vfio_setup() < 0) + rte_panic("Cannot init VFIO\n"); +#endif + #ifdef RTE_LIBRTE_IVSHMEM if (rte_eal_ivshmem_init() < 0) rte_panic("Cannot init IVSHMEM\n"); diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c index 5578c254..42a29faf 100644 --- a/lib/librte_eal/linuxapp/eal/eal_memory.c +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -164,6 +164,29 @@ rte_mem_virt2phy(const void *virtaddr) int page_size; off_t offset; + /* when using dom0, /proc/self/pagemap always returns 0, check in + * dpdk memory by browsing the memsegs */ + if (rte_xen_dom0_supported()) { + struct rte_mem_config *mcfg; + struct rte_memseg *memseg; + unsigned i; + + mcfg = rte_eal_get_configuration()->mem_config; + for (i = 0; i < RTE_MAX_MEMSEG; i++) { + memseg = &mcfg->memseg[i]; + if (memseg->addr == NULL) + break; + if (virtaddr > memseg->addr && + virtaddr < RTE_PTR_ADD(memseg->addr, + memseg->len)) { + return memseg->phys_addr + + RTE_PTR_DIFF(virtaddr, memseg->addr); + } + } + + return RTE_BAD_PHYS_ADDR; + } + /* Cannot parse /proc/self/pagemap, no need to log errors everywhere */ if (!proc_pagemap_readable) return RTE_BAD_PHYS_ADDR; @@ -1136,7 +1159,7 @@ int rte_eal_hugepage_init(void) { struct rte_mem_config *mcfg; - struct hugepage_file *hugepage, *tmp_hp = NULL; + struct hugepage_file *hugepage = NULL, *tmp_hp = NULL; struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES]; uint64_t memory[RTE_MAX_NUMA_NODES]; @@ -1479,14 +1502,19 @@ rte_eal_hugepage_init(void) "of memory.\n", i, nr_hugefiles, RTE_STR(CONFIG_RTE_MAX_MEMSEG), RTE_MAX_MEMSEG); - return -ENOMEM; + goto fail; } + munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file)); + return 0; fail: huge_recover_sigbus(); free(tmp_hp); + if (hugepage != NULL) + munmap(hugepage, nr_hugefiles * sizeof(struct hugepage_file)); + return -1; } diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/lib/librte_eal/linuxapp/eal/eal_pci.c index f9c3efd2..cd9de7cc 100644 --- a/lib/librte_eal/linuxapp/eal/eal_pci.c +++ b/lib/librte_eal/linuxapp/eal/eal_pci.c @@ -754,21 +754,6 @@ rte_eal_pci_init(void) RTE_LOG(ERR, EAL, "%s(): Cannot scan PCI bus\n", __func__); return -1; } -#ifdef VFIO_PRESENT - pci_vfio_enable(); - - if (pci_vfio_is_enabled()) { - - /* if we are primary process, create a thread to communicate with - * secondary processes. the thread will use a socket to wait for - * requests from secondary process to send open file descriptors, - * because VFIO does not allow multiple open descriptors on a group or - * VFIO container. - */ - if (internal_config.process_type == RTE_PROC_PRIMARY && - pci_vfio_mp_sync_setup() < 0) - return -1; - } -#endif + return 0; } diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_init.h b/lib/librte_eal/linuxapp/eal/eal_pci_init.h index f72a2548..6a960d1b 100644 --- a/lib/librte_eal/linuxapp/eal/eal_pci_init.h +++ b/lib/librte_eal/linuxapp/eal/eal_pci_init.h @@ -74,12 +74,6 @@ int pci_uio_ioport_unmap(struct rte_pci_ioport *p); #ifdef VFIO_PRESENT -#define VFIO_MAX_GROUPS 64 - -int pci_vfio_enable(void); -int pci_vfio_is_enabled(void); -int pci_vfio_mp_sync_setup(void); - /* access config space */ int pci_vfio_read_config(const struct rte_intr_handle *intr_handle, void *buf, size_t len, off_t offs); @@ -96,41 +90,6 @@ int pci_vfio_ioport_unmap(struct rte_pci_ioport *p); /* map VFIO resource prototype */ int pci_vfio_map_resource(struct rte_pci_device *dev); -int pci_vfio_get_group_fd(int iommu_group_fd); -int pci_vfio_get_container_fd(void); - -/* - * Function prototypes for VFIO multiprocess sync functions - */ -int vfio_mp_sync_send_request(int socket, int req); -int vfio_mp_sync_receive_request(int socket); -int vfio_mp_sync_send_fd(int socket, int fd); -int vfio_mp_sync_receive_fd(int socket); -int vfio_mp_sync_connect_to_primary(void); - -/* socket comm protocol definitions */ -#define SOCKET_REQ_CONTAINER 0x100 -#define SOCKET_REQ_GROUP 0x200 -#define SOCKET_OK 0x0 -#define SOCKET_NO_FD 0x1 -#define SOCKET_ERR 0xFF - -/* - * we don't need to store device fd's anywhere since they can be obtained from - * the group fd via an ioctl() call. - */ -struct vfio_group { - int group_no; - int fd; -}; - -struct vfio_config { - int vfio_enabled; - int vfio_container_fd; - int vfio_container_has_dma; - int vfio_group_idx; - struct vfio_group vfio_groups[VFIO_MAX_GROUPS]; -}; #endif diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c b/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c index f91b9242..46cd6831 100644 --- a/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c +++ b/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c @@ -43,11 +43,11 @@ #include <rte_pci.h> #include <rte_eal_memconfig.h> #include <rte_malloc.h> -#include <eal_private.h> #include "eal_filesystem.h" #include "eal_pci_init.h" #include "eal_vfio.h" +#include "eal_private.h" /** * @file @@ -69,78 +69,6 @@ static struct rte_tailq_elem rte_vfio_tailq = { }; EAL_REGISTER_TAILQ(rte_vfio_tailq) -#define VFIO_DIR "/dev/vfio" -#define VFIO_CONTAINER_PATH "/dev/vfio/vfio" -#define VFIO_GROUP_FMT "/dev/vfio/%u" -#define VFIO_NOIOMMU_GROUP_FMT "/dev/vfio/noiommu-%u" -#define VFIO_GET_REGION_ADDR(x) ((uint64_t) x << 40ULL) -#define VFIO_GET_REGION_IDX(x) (x >> 40) - -/* per-process VFIO config */ -static struct vfio_config vfio_cfg; - -/* DMA mapping function prototype. - * Takes VFIO container fd as a parameter. - * Returns 0 on success, -1 on error. - * */ -typedef int (*vfio_dma_func_t)(int); - -struct vfio_iommu_type { - int type_id; - const char *name; - vfio_dma_func_t dma_map_func; -}; - -static int vfio_type1_dma_map(int); -static int vfio_noiommu_dma_map(int); - -/* IOMMU types we support */ -static const struct vfio_iommu_type iommu_types[] = { - /* x86 IOMMU, otherwise known as type 1 */ - { RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map}, - /* IOMMU-less mode */ - { RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map}, -}; - -int -vfio_type1_dma_map(int vfio_container_fd) -{ - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - int i, ret; - - /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */ - for (i = 0; i < RTE_MAX_MEMSEG; i++) { - struct vfio_iommu_type1_dma_map dma_map; - - if (ms[i].addr == NULL) - break; - - memset(&dma_map, 0, sizeof(dma_map)); - dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map); - dma_map.vaddr = ms[i].addr_64; - dma_map.size = ms[i].len; - dma_map.iova = ms[i].phys_addr; - dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE; - - ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map); - - if (ret) { - RTE_LOG(ERR, EAL, " cannot set up DMA remapping, " - "error %i (%s)\n", errno, strerror(errno)); - return -1; - } - } - - return 0; -} - -int -vfio_noiommu_dma_map(int __rte_unused vfio_container_fd) -{ - /* No-IOMMU mode does not need DMA mapping */ - return 0; -} - int pci_vfio_read_config(const struct rte_intr_handle *intr_handle, void *buf, size_t len, off_t offs) @@ -272,63 +200,6 @@ pci_vfio_set_bus_master(int dev_fd) return 0; } -/* pick IOMMU type. returns a pointer to vfio_iommu_type or NULL for error */ -static const struct vfio_iommu_type * -pci_vfio_set_iommu_type(int vfio_container_fd) { - unsigned idx; - for (idx = 0; idx < RTE_DIM(iommu_types); idx++) { - const struct vfio_iommu_type *t = &iommu_types[idx]; - - int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU, - t->type_id); - if (!ret) { - RTE_LOG(NOTICE, EAL, " using IOMMU type %d (%s)\n", - t->type_id, t->name); - return t; - } - /* not an error, there may be more supported IOMMU types */ - RTE_LOG(DEBUG, EAL, " set IOMMU type %d (%s) failed, " - "error %i (%s)\n", t->type_id, t->name, errno, - strerror(errno)); - } - /* if we didn't find a suitable IOMMU type, fail */ - return NULL; -} - -/* check if we have any supported extensions */ -static int -pci_vfio_has_supported_extensions(int vfio_container_fd) { - int ret; - unsigned idx, n_extensions = 0; - for (idx = 0; idx < RTE_DIM(iommu_types); idx++) { - const struct vfio_iommu_type *t = &iommu_types[idx]; - - ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, - t->type_id); - if (ret < 0) { - RTE_LOG(ERR, EAL, " could not get IOMMU type, " - "error %i (%s)\n", errno, - strerror(errno)); - close(vfio_container_fd); - return -1; - } else if (ret == 1) { - /* we found a supported extension */ - n_extensions++; - } - RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n", - t->type_id, t->name, - ret ? "supported" : "not supported"); - } - - /* if we didn't find any supported IOMMU types, fail */ - if (!n_extensions) { - close(vfio_container_fd); - return -1; - } - - return 0; -} - /* set up interrupt support (but not enable interrupts) */ static int pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd) @@ -425,220 +296,6 @@ pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd) return -1; } -/* open container fd or get an existing one */ -int -pci_vfio_get_container_fd(void) -{ - int ret, vfio_container_fd; - - /* if we're in a primary process, try to open the container */ - if (internal_config.process_type == RTE_PROC_PRIMARY) { - vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR); - if (vfio_container_fd < 0) { - RTE_LOG(ERR, EAL, " cannot open VFIO container, " - "error %i (%s)\n", errno, strerror(errno)); - return -1; - } - - /* check VFIO API version */ - ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION); - if (ret != VFIO_API_VERSION) { - if (ret < 0) - RTE_LOG(ERR, EAL, " could not get VFIO API version, " - "error %i (%s)\n", errno, strerror(errno)); - else - RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n"); - close(vfio_container_fd); - return -1; - } - - ret = pci_vfio_has_supported_extensions(vfio_container_fd); - if (ret) { - RTE_LOG(ERR, EAL, " no supported IOMMU " - "extensions found!\n"); - return -1; - } - - return vfio_container_fd; - } else { - /* - * if we're in a secondary process, request container fd from the - * primary process via our socket - */ - int socket_fd; - - socket_fd = vfio_mp_sync_connect_to_primary(); - if (socket_fd < 0) { - RTE_LOG(ERR, EAL, " cannot connect to primary process!\n"); - return -1; - } - if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_CONTAINER) < 0) { - RTE_LOG(ERR, EAL, " cannot request container fd!\n"); - close(socket_fd); - return -1; - } - vfio_container_fd = vfio_mp_sync_receive_fd(socket_fd); - if (vfio_container_fd < 0) { - RTE_LOG(ERR, EAL, " cannot get container fd!\n"); - close(socket_fd); - return -1; - } - close(socket_fd); - return vfio_container_fd; - } - - return -1; -} - -/* open group fd or get an existing one */ -int -pci_vfio_get_group_fd(int iommu_group_no) -{ - int i; - int vfio_group_fd; - char filename[PATH_MAX]; - - /* check if we already have the group descriptor open */ - for (i = 0; i < vfio_cfg.vfio_group_idx; i++) - if (vfio_cfg.vfio_groups[i].group_no == iommu_group_no) - return vfio_cfg.vfio_groups[i].fd; - - /* if primary, try to open the group */ - if (internal_config.process_type == RTE_PROC_PRIMARY) { - /* try regular group format */ - snprintf(filename, sizeof(filename), - VFIO_GROUP_FMT, iommu_group_no); - vfio_group_fd = open(filename, O_RDWR); - if (vfio_group_fd < 0) { - /* if file not found, it's not an error */ - if (errno != ENOENT) { - RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename, - strerror(errno)); - return -1; - } - - /* special case: try no-IOMMU path as well */ - snprintf(filename, sizeof(filename), - VFIO_NOIOMMU_GROUP_FMT, iommu_group_no); - vfio_group_fd = open(filename, O_RDWR); - if (vfio_group_fd < 0) { - if (errno != ENOENT) { - RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename, - strerror(errno)); - return -1; - } - return 0; - } - /* noiommu group found */ - } - - /* if the fd is valid, create a new group for it */ - if (vfio_cfg.vfio_group_idx == VFIO_MAX_GROUPS) { - RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n"); - close(vfio_group_fd); - return -1; - } - vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no; - vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd; - return vfio_group_fd; - } - /* if we're in a secondary process, request group fd from the primary - * process via our socket - */ - else { - int socket_fd, ret; - - socket_fd = vfio_mp_sync_connect_to_primary(); - - if (socket_fd < 0) { - RTE_LOG(ERR, EAL, " cannot connect to primary process!\n"); - return -1; - } - if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_GROUP) < 0) { - RTE_LOG(ERR, EAL, " cannot request container fd!\n"); - close(socket_fd); - return -1; - } - if (vfio_mp_sync_send_request(socket_fd, iommu_group_no) < 0) { - RTE_LOG(ERR, EAL, " cannot send group number!\n"); - close(socket_fd); - return -1; - } - ret = vfio_mp_sync_receive_request(socket_fd); - switch (ret) { - case SOCKET_NO_FD: - close(socket_fd); - return 0; - case SOCKET_OK: - vfio_group_fd = vfio_mp_sync_receive_fd(socket_fd); - /* if we got the fd, return it */ - if (vfio_group_fd > 0) { - close(socket_fd); - return vfio_group_fd; - } - /* fall-through on error */ - default: - RTE_LOG(ERR, EAL, " cannot get container fd!\n"); - close(socket_fd); - return -1; - } - } - return -1; -} - -/* parse IOMMU group number for a PCI device - * returns 1 on success, -1 for errors, 0 for non-existent group - */ -static int -pci_vfio_get_group_no(const char *pci_addr, int *iommu_group_no) -{ - char linkname[PATH_MAX]; - char filename[PATH_MAX]; - char *tok[16], *group_tok, *end; - int ret; - - memset(linkname, 0, sizeof(linkname)); - memset(filename, 0, sizeof(filename)); - - /* try to find out IOMMU group for this device */ - snprintf(linkname, sizeof(linkname), - "%s/%s/iommu_group", pci_get_sysfs_path(), pci_addr); - - ret = readlink(linkname, filename, sizeof(filename)); - - /* if the link doesn't exist, no VFIO for us */ - if (ret < 0) - return 0; - - ret = rte_strsplit(filename, sizeof(filename), - tok, RTE_DIM(tok), '/'); - - if (ret <= 0) { - RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", pci_addr); - return -1; - } - - /* IOMMU group is always the last token */ - errno = 0; - group_tok = tok[ret - 1]; - end = group_tok; - *iommu_group_no = strtol(group_tok, &end, 10); - if ((end != group_tok && *end != '\0') || errno != 0) { - RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", pci_addr); - return -1; - } - - return 1; -} - -static void -clear_current_group(void) -{ - vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = 0; - vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = -1; -} - - /* * map the PCI resources of a PCI device in virtual memory (VFIO version). * primary and secondary processes follow almost exactly the same path @@ -646,13 +303,9 @@ clear_current_group(void) int pci_vfio_map_resource(struct rte_pci_device *dev) { - struct vfio_group_status group_status = { - .argsz = sizeof(group_status) - }; struct vfio_device_info device_info = { .argsz = sizeof(device_info) }; - int vfio_group_fd, vfio_dev_fd; - int iommu_group_no; char pci_addr[PATH_MAX] = {0}; + int vfio_dev_fd; struct rte_pci_addr *loc = &dev->addr; int i, ret, msix_bar; struct mapped_pci_resource *vfio_res = NULL; @@ -670,127 +323,9 @@ pci_vfio_map_resource(struct rte_pci_device *dev) snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT, loc->domain, loc->bus, loc->devid, loc->function); - /* get group number */ - ret = pci_vfio_get_group_no(pci_addr, &iommu_group_no); - if (ret == 0) { - RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n", - pci_addr); - return 1; - } - - /* if negative, something failed */ - if (ret < 0) - return -1; - - /* get the actual group fd */ - vfio_group_fd = pci_vfio_get_group_fd(iommu_group_no); - if (vfio_group_fd < 0) - return -1; - - /* store group fd */ - vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no; - vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd; - - /* if group_fd == 0, that means the device isn't managed by VFIO */ - if (vfio_group_fd == 0) { - RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n", - pci_addr); - /* we store 0 as group fd to distinguish between existing but - * unbound VFIO groups, and groups that don't exist at all. - */ - vfio_cfg.vfio_group_idx++; - return 1; - } - - /* - * at this point, we know at least one port on this device is bound to VFIO, - * so we can proceed to try and set this particular port up - */ - - /* check if the group is viable */ - ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status); - if (ret) { - RTE_LOG(ERR, EAL, " %s cannot get group status, " - "error %i (%s)\n", pci_addr, errno, strerror(errno)); - close(vfio_group_fd); - clear_current_group(); - return -1; - } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) { - RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", pci_addr); - close(vfio_group_fd); - clear_current_group(); - return -1; - } - - /* - * at this point, we know that this group is viable (meaning, all devices - * are either bound to VFIO or not bound to anything) - */ - - /* check if group does not have a container yet */ - if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) { - - /* add group to a container */ - ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER, - &vfio_cfg.vfio_container_fd); - if (ret) { - RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, " - "error %i (%s)\n", pci_addr, errno, strerror(errno)); - close(vfio_group_fd); - clear_current_group(); - return -1; - } - /* - * at this point we know that this group has been successfully - * initialized, so we increment vfio_group_idx to indicate that we can - * add new groups. - */ - vfio_cfg.vfio_group_idx++; - } - - /* - * pick an IOMMU type and set up DMA mappings for container - * - * needs to be done only once, only when at least one group is assigned to - * a container and only in primary process - */ - if (internal_config.process_type == RTE_PROC_PRIMARY && - vfio_cfg.vfio_container_has_dma == 0) { - /* select an IOMMU type which we will be using */ - const struct vfio_iommu_type *t = - pci_vfio_set_iommu_type(vfio_cfg.vfio_container_fd); - if (!t) { - RTE_LOG(ERR, EAL, " %s failed to select IOMMU type\n", pci_addr); - return -1; - } - ret = t->dma_map_func(vfio_cfg.vfio_container_fd); - if (ret) { - RTE_LOG(ERR, EAL, " %s DMA remapping failed, " - "error %i (%s)\n", pci_addr, errno, strerror(errno)); - return -1; - } - vfio_cfg.vfio_container_has_dma = 1; - } - - /* get a file descriptor for the device */ - vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, pci_addr); - if (vfio_dev_fd < 0) { - /* if we cannot get a device fd, this simply means that this - * particular port is not bound to VFIO - */ - RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n", - pci_addr); - return 1; - } - - /* test and setup the device */ - ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_INFO, &device_info); - if (ret) { - RTE_LOG(ERR, EAL, " %s cannot get device info, " - "error %i (%s)\n", pci_addr, errno, strerror(errno)); - close(vfio_dev_fd); - return -1; - } + if ((ret = vfio_setup_device(pci_get_sysfs_path(), pci_addr, + &vfio_dev_fd, &device_info))) + return ret; /* get MSI-X BAR, if any (we have to know where it is because we can't * easily mmap it when using VFIO) */ @@ -1048,50 +583,12 @@ pci_vfio_ioport_unmap(struct rte_pci_ioport *p) int pci_vfio_enable(void) { - /* initialize group list */ - int i; - int vfio_available; - - for (i = 0; i < VFIO_MAX_GROUPS; i++) { - vfio_cfg.vfio_groups[i].fd = -1; - vfio_cfg.vfio_groups[i].group_no = -1; - } - - /* inform the user that we are probing for VFIO */ - RTE_LOG(INFO, EAL, "Probing VFIO support...\n"); - - /* check if vfio-pci module is loaded */ - vfio_available = rte_eal_check_module("vfio_pci"); - - /* return error directly */ - if (vfio_available == -1) { - RTE_LOG(INFO, EAL, "Could not get loaded module details!\n"); - return -1; - } - - /* return 0 if VFIO modules not loaded */ - if (vfio_available == 0) { - RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, " - "skipping VFIO support...\n"); - return 0; - } - - vfio_cfg.vfio_container_fd = pci_vfio_get_container_fd(); - - /* check if we have VFIO driver enabled */ - if (vfio_cfg.vfio_container_fd != -1) { - RTE_LOG(NOTICE, EAL, "VFIO support initialized\n"); - vfio_cfg.vfio_enabled = 1; - } else { - RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n"); - } - - return 0; + return vfio_enable("vfio_pci"); } int pci_vfio_is_enabled(void) { - return vfio_cfg.vfio_enabled; + return vfio_is_enabled("vfio_pci"); } #endif diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.c b/lib/librte_eal/linuxapp/eal/eal_vfio.c new file mode 100644 index 00000000..fcb0ab38 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_vfio.c @@ -0,0 +1,547 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <string.h> +#include <fcntl.h> +#include <unistd.h> +#include <sys/ioctl.h> + +#include <rte_log.h> +#include <rte_memory.h> +#include <rte_eal_memconfig.h> + +#include "eal_filesystem.h" +#include "eal_vfio.h" +#include "eal_private.h" + +#ifdef VFIO_PRESENT + +/* per-process VFIO config */ +static struct vfio_config vfio_cfg; + +static int vfio_type1_dma_map(int); +static int vfio_noiommu_dma_map(int); + +/* IOMMU types we support */ +static const struct vfio_iommu_type iommu_types[] = { + /* x86 IOMMU, otherwise known as type 1 */ + { RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map}, + /* IOMMU-less mode */ + { RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map}, +}; + +int +vfio_get_group_fd(int iommu_group_no) +{ + int i; + int vfio_group_fd; + char filename[PATH_MAX]; + + /* check if we already have the group descriptor open */ + for (i = 0; i < vfio_cfg.vfio_group_idx; i++) + if (vfio_cfg.vfio_groups[i].group_no == iommu_group_no) + return vfio_cfg.vfio_groups[i].fd; + + /* if primary, try to open the group */ + if (internal_config.process_type == RTE_PROC_PRIMARY) { + /* try regular group format */ + snprintf(filename, sizeof(filename), + VFIO_GROUP_FMT, iommu_group_no); + vfio_group_fd = open(filename, O_RDWR); + if (vfio_group_fd < 0) { + /* if file not found, it's not an error */ + if (errno != ENOENT) { + RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename, + strerror(errno)); + return -1; + } + + /* special case: try no-IOMMU path as well */ + snprintf(filename, sizeof(filename), + VFIO_NOIOMMU_GROUP_FMT, iommu_group_no); + vfio_group_fd = open(filename, O_RDWR); + if (vfio_group_fd < 0) { + if (errno != ENOENT) { + RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename, + strerror(errno)); + return -1; + } + return 0; + } + /* noiommu group found */ + } + + /* if the fd is valid, create a new group for it */ + if (vfio_cfg.vfio_group_idx == VFIO_MAX_GROUPS) { + RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n"); + close(vfio_group_fd); + return -1; + } + vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no; + vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd; + return vfio_group_fd; + } + /* if we're in a secondary process, request group fd from the primary + * process via our socket + */ + else { + int socket_fd, ret; + + socket_fd = vfio_mp_sync_connect_to_primary(); + + if (socket_fd < 0) { + RTE_LOG(ERR, EAL, " cannot connect to primary process!\n"); + return -1; + } + if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_GROUP) < 0) { + RTE_LOG(ERR, EAL, " cannot request container fd!\n"); + close(socket_fd); + return -1; + } + if (vfio_mp_sync_send_request(socket_fd, iommu_group_no) < 0) { + RTE_LOG(ERR, EAL, " cannot send group number!\n"); + close(socket_fd); + return -1; + } + ret = vfio_mp_sync_receive_request(socket_fd); + switch (ret) { + case SOCKET_NO_FD: + close(socket_fd); + return 0; + case SOCKET_OK: + vfio_group_fd = vfio_mp_sync_receive_fd(socket_fd); + /* if we got the fd, return it */ + if (vfio_group_fd > 0) { + close(socket_fd); + return vfio_group_fd; + } + /* fall-through on error */ + default: + RTE_LOG(ERR, EAL, " cannot get container fd!\n"); + close(socket_fd); + return -1; + } + } + return -1; +} + +static void +clear_current_group(void) +{ + vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = 0; + vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = -1; +} + +int vfio_setup_device(const char *sysfs_base, const char *dev_addr, + int *vfio_dev_fd, struct vfio_device_info *device_info) +{ + struct vfio_group_status group_status = { + .argsz = sizeof(group_status) + }; + int vfio_group_fd; + int iommu_group_no; + int ret; + + /* get group number */ + ret = vfio_get_group_no(sysfs_base, dev_addr, &iommu_group_no); + if (ret == 0) { + RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n", + dev_addr); + return 1; + } + + /* if negative, something failed */ + if (ret < 0) + return -1; + + /* get the actual group fd */ + vfio_group_fd = vfio_get_group_fd(iommu_group_no); + if (vfio_group_fd < 0) + return -1; + + /* store group fd */ + vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].group_no = iommu_group_no; + vfio_cfg.vfio_groups[vfio_cfg.vfio_group_idx].fd = vfio_group_fd; + + /* if group_fd == 0, that means the device isn't managed by VFIO */ + if (vfio_group_fd == 0) { + RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n", + dev_addr); + /* we store 0 as group fd to distinguish between existing but + * unbound VFIO groups, and groups that don't exist at all. + */ + vfio_cfg.vfio_group_idx++; + return 1; + } + + /* + * at this point, we know that this group is viable (meaning, all devices + * are either bound to VFIO or not bound to anything) + */ + + /* check if the group is viable */ + ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status); + if (ret) { + RTE_LOG(ERR, EAL, " %s cannot get group status, " + "error %i (%s)\n", dev_addr, errno, strerror(errno)); + close(vfio_group_fd); + clear_current_group(); + return -1; + } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) { + RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", dev_addr); + close(vfio_group_fd); + clear_current_group(); + return -1; + } + + /* check if group does not have a container yet */ + if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) { + + /* add group to a container */ + ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER, + &vfio_cfg.vfio_container_fd); + if (ret) { + RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, " + "error %i (%s)\n", dev_addr, errno, strerror(errno)); + close(vfio_group_fd); + clear_current_group(); + return -1; + } + /* + * at this point we know that this group has been successfully + * initialized, so we increment vfio_group_idx to indicate that we can + * add new groups. + */ + vfio_cfg.vfio_group_idx++; + } + + /* + * pick an IOMMU type and set up DMA mappings for container + * + * needs to be done only once, only when at least one group is assigned to + * a container and only in primary process + */ + if (internal_config.process_type == RTE_PROC_PRIMARY && + vfio_cfg.vfio_container_has_dma == 0) { + /* select an IOMMU type which we will be using */ + const struct vfio_iommu_type *t = + vfio_set_iommu_type(vfio_cfg.vfio_container_fd); + if (!t) { + RTE_LOG(ERR, EAL, " %s failed to select IOMMU type\n", dev_addr); + return -1; + } + ret = t->dma_map_func(vfio_cfg.vfio_container_fd); + if (ret) { + RTE_LOG(ERR, EAL, " %s DMA remapping failed, " + "error %i (%s)\n", dev_addr, errno, strerror(errno)); + return -1; + } + vfio_cfg.vfio_container_has_dma = 1; + } + + /* get a file descriptor for the device */ + *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr); + if (*vfio_dev_fd < 0) { + /* if we cannot get a device fd, this simply means that this + * particular port is not bound to VFIO + */ + RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n", + dev_addr); + return 1; + } + + /* test and setup the device */ + ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info); + if (ret) { + RTE_LOG(ERR, EAL, " %s cannot get device info, " + "error %i (%s)\n", dev_addr, errno, strerror(errno)); + close(*vfio_dev_fd); + return -1; + } + + return 0; +} + +int +vfio_enable(const char *modname) +{ + /* initialize group list */ + int i; + int vfio_available; + + for (i = 0; i < VFIO_MAX_GROUPS; i++) { + vfio_cfg.vfio_groups[i].fd = -1; + vfio_cfg.vfio_groups[i].group_no = -1; + } + + /* inform the user that we are probing for VFIO */ + RTE_LOG(INFO, EAL, "Probing VFIO support...\n"); + + /* check if vfio-pci module is loaded */ + vfio_available = rte_eal_check_module(modname); + + /* return error directly */ + if (vfio_available == -1) { + RTE_LOG(INFO, EAL, "Could not get loaded module details!\n"); + return -1; + } + + /* return 0 if VFIO modules not loaded */ + if (vfio_available == 0) { + RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, " + "skipping VFIO support...\n"); + return 0; + } + + vfio_cfg.vfio_container_fd = vfio_get_container_fd(); + + /* check if we have VFIO driver enabled */ + if (vfio_cfg.vfio_container_fd != -1) { + RTE_LOG(NOTICE, EAL, "VFIO support initialized\n"); + vfio_cfg.vfio_enabled = 1; + } else { + RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n"); + } + + return 0; +} + +int +vfio_is_enabled(const char *modname) +{ + const int mod_available = rte_eal_check_module(modname); + return vfio_cfg.vfio_enabled && mod_available; +} + +const struct vfio_iommu_type * +vfio_set_iommu_type(int vfio_container_fd) { + unsigned idx; + for (idx = 0; idx < RTE_DIM(iommu_types); idx++) { + const struct vfio_iommu_type *t = &iommu_types[idx]; + + int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU, + t->type_id); + if (!ret) { + RTE_LOG(NOTICE, EAL, " using IOMMU type %d (%s)\n", + t->type_id, t->name); + return t; + } + /* not an error, there may be more supported IOMMU types */ + RTE_LOG(DEBUG, EAL, " set IOMMU type %d (%s) failed, " + "error %i (%s)\n", t->type_id, t->name, errno, + strerror(errno)); + } + /* if we didn't find a suitable IOMMU type, fail */ + return NULL; +} + +int +vfio_has_supported_extensions(int vfio_container_fd) { + int ret; + unsigned idx, n_extensions = 0; + for (idx = 0; idx < RTE_DIM(iommu_types); idx++) { + const struct vfio_iommu_type *t = &iommu_types[idx]; + + ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION, + t->type_id); + if (ret < 0) { + RTE_LOG(ERR, EAL, " could not get IOMMU type, " + "error %i (%s)\n", errno, + strerror(errno)); + close(vfio_container_fd); + return -1; + } else if (ret == 1) { + /* we found a supported extension */ + n_extensions++; + } + RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n", + t->type_id, t->name, + ret ? "supported" : "not supported"); + } + + /* if we didn't find any supported IOMMU types, fail */ + if (!n_extensions) { + close(vfio_container_fd); + return -1; + } + + return 0; +} + +int +vfio_get_container_fd(void) +{ + int ret, vfio_container_fd; + + /* if we're in a primary process, try to open the container */ + if (internal_config.process_type == RTE_PROC_PRIMARY) { + vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR); + if (vfio_container_fd < 0) { + RTE_LOG(ERR, EAL, " cannot open VFIO container, " + "error %i (%s)\n", errno, strerror(errno)); + return -1; + } + + /* check VFIO API version */ + ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION); + if (ret != VFIO_API_VERSION) { + if (ret < 0) + RTE_LOG(ERR, EAL, " could not get VFIO API version, " + "error %i (%s)\n", errno, strerror(errno)); + else + RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n"); + close(vfio_container_fd); + return -1; + } + + ret = vfio_has_supported_extensions(vfio_container_fd); + if (ret) { + RTE_LOG(ERR, EAL, " no supported IOMMU " + "extensions found!\n"); + return -1; + } + + return vfio_container_fd; + } else { + /* + * if we're in a secondary process, request container fd from the + * primary process via our socket + */ + int socket_fd; + + socket_fd = vfio_mp_sync_connect_to_primary(); + if (socket_fd < 0) { + RTE_LOG(ERR, EAL, " cannot connect to primary process!\n"); + return -1; + } + if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_CONTAINER) < 0) { + RTE_LOG(ERR, EAL, " cannot request container fd!\n"); + close(socket_fd); + return -1; + } + vfio_container_fd = vfio_mp_sync_receive_fd(socket_fd); + if (vfio_container_fd < 0) { + RTE_LOG(ERR, EAL, " cannot get container fd!\n"); + close(socket_fd); + return -1; + } + close(socket_fd); + return vfio_container_fd; + } + + return -1; +} + +int +vfio_get_group_no(const char *sysfs_base, + const char *dev_addr, int *iommu_group_no) +{ + char linkname[PATH_MAX]; + char filename[PATH_MAX]; + char *tok[16], *group_tok, *end; + int ret; + + memset(linkname, 0, sizeof(linkname)); + memset(filename, 0, sizeof(filename)); + + /* try to find out IOMMU group for this device */ + snprintf(linkname, sizeof(linkname), + "%s/%s/iommu_group", sysfs_base, dev_addr); + + ret = readlink(linkname, filename, sizeof(filename)); + + /* if the link doesn't exist, no VFIO for us */ + if (ret < 0) + return 0; + + ret = rte_strsplit(filename, sizeof(filename), + tok, RTE_DIM(tok), '/'); + + if (ret <= 0) { + RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", dev_addr); + return -1; + } + + /* IOMMU group is always the last token */ + errno = 0; + group_tok = tok[ret - 1]; + end = group_tok; + *iommu_group_no = strtol(group_tok, &end, 10); + if ((end != group_tok && *end != '\0') || errno != 0) { + RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", dev_addr); + return -1; + } + + return 1; +} + +static int +vfio_type1_dma_map(int vfio_container_fd) +{ + const struct rte_memseg *ms = rte_eal_get_physmem_layout(); + int i, ret; + + /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */ + for (i = 0; i < RTE_MAX_MEMSEG; i++) { + struct vfio_iommu_type1_dma_map dma_map; + + if (ms[i].addr == NULL) + break; + + memset(&dma_map, 0, sizeof(dma_map)); + dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map); + dma_map.vaddr = ms[i].addr_64; + dma_map.size = ms[i].len; + dma_map.iova = ms[i].phys_addr; + dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE; + + ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map); + + if (ret) { + RTE_LOG(ERR, EAL, " cannot set up DMA remapping, " + "error %i (%s)\n", errno, strerror(errno)); + return -1; + } + } + + return 0; +} + +static int +vfio_noiommu_dma_map(int __rte_unused vfio_container_fd) +{ + /* No-IOMMU mode does not need DMA mapping */ + return 0; +} + +#endif diff --git a/lib/librte_eal/linuxapp/eal/eal_vfio.h b/lib/librte_eal/linuxapp/eal/eal_vfio.h index f483bf40..29f7f3ec 100644 --- a/lib/librte_eal/linuxapp/eal/eal_vfio.h +++ b/lib/librte_eal/linuxapp/eal/eal_vfio.h @@ -60,6 +60,100 @@ #define RTE_VFIO_NOIOMMU VFIO_NOIOMMU_IOMMU #endif +#define VFIO_MAX_GROUPS 64 + +/* + * Function prototypes for VFIO multiprocess sync functions + */ +int vfio_mp_sync_send_request(int socket, int req); +int vfio_mp_sync_receive_request(int socket); +int vfio_mp_sync_send_fd(int socket, int fd); +int vfio_mp_sync_receive_fd(int socket); +int vfio_mp_sync_connect_to_primary(void); + +/* + * we don't need to store device fd's anywhere since they can be obtained from + * the group fd via an ioctl() call. + */ +struct vfio_group { + int group_no; + int fd; +}; + +struct vfio_config { + int vfio_enabled; + int vfio_container_fd; + int vfio_container_has_dma; + int vfio_group_idx; + struct vfio_group vfio_groups[VFIO_MAX_GROUPS]; +}; + +#define VFIO_DIR "/dev/vfio" +#define VFIO_CONTAINER_PATH "/dev/vfio/vfio" +#define VFIO_GROUP_FMT "/dev/vfio/%u" +#define VFIO_NOIOMMU_GROUP_FMT "/dev/vfio/noiommu-%u" +#define VFIO_GET_REGION_ADDR(x) ((uint64_t) x << 40ULL) +#define VFIO_GET_REGION_IDX(x) (x >> 40) + +/* DMA mapping function prototype. + * Takes VFIO container fd as a parameter. + * Returns 0 on success, -1 on error. + * */ +typedef int (*vfio_dma_func_t)(int); + +struct vfio_iommu_type { + int type_id; + const char *name; + vfio_dma_func_t dma_map_func; +}; + +/* pick IOMMU type. returns a pointer to vfio_iommu_type or NULL for error */ +const struct vfio_iommu_type * +vfio_set_iommu_type(int vfio_container_fd); + +/* check if we have any supported extensions */ +int +vfio_has_supported_extensions(int vfio_container_fd); + +/* open container fd or get an existing one */ +int +vfio_get_container_fd(void); + +/* parse IOMMU group number for a device + * returns 1 on success, -1 for errors, 0 for non-existent group + */ +int +vfio_get_group_no(const char *sysfs_base, + const char *dev_addr, int *iommu_group_no); + +/* open group fd or get an existing one */ +int +vfio_get_group_fd(int iommu_group_no); + +/** + * Setup vfio_cfg for the device identified by its address. It discovers + * the configured I/O MMU groups or sets a new one for the device. If a new + * groups is assigned, the DMA mapping is performed. + * Returns 0 on success, a negative value on failure and a positive value in + * case the given device cannot be managed this way. + */ +int vfio_setup_device(const char *sysfs_base, const char *dev_addr, + int *vfio_dev_fd, struct vfio_device_info *device_info); + +int vfio_enable(const char *modname); +int vfio_is_enabled(const char *modname); + +int pci_vfio_enable(void); +int pci_vfio_is_enabled(void); + +int vfio_mp_sync_setup(void); + +#define SOCKET_REQ_CONTAINER 0x100 +#define SOCKET_REQ_GROUP 0x200 +#define SOCKET_OK 0x0 +#define SOCKET_NO_FD 0x1 +#define SOCKET_ERR 0xFF + #define VFIO_PRESENT #endif /* kernel version */ #endif /* RTE_EAL_VFIO */ diff --git a/lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c b/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c index d54ded88..00cf919b 100644 --- a/lib/librte_eal/linuxapp/eal/eal_pci_vfio_mp_sync.c +++ b/lib/librte_eal/linuxapp/eal/eal_vfio_mp_sync.c @@ -265,7 +265,7 @@ vfio_mp_sync_connect_to_primary(void) * socket listening thread for primary process */ static __attribute__((noreturn)) void * -pci_vfio_mp_sync_thread(void __rte_unused * arg) +vfio_mp_sync_thread(void __rte_unused * arg) { int ret, fd, vfio_group_no; @@ -296,7 +296,7 @@ pci_vfio_mp_sync_thread(void __rte_unused * arg) switch (ret) { case SOCKET_REQ_CONTAINER: - fd = pci_vfio_get_container_fd(); + fd = vfio_get_container_fd(); if (fd < 0) vfio_mp_sync_send_request(conn_sock, SOCKET_ERR); else @@ -310,7 +310,7 @@ pci_vfio_mp_sync_thread(void __rte_unused * arg) continue; } - fd = pci_vfio_get_group_fd(vfio_group_no); + fd = vfio_get_group_fd(vfio_group_no); if (fd < 0) vfio_mp_sync_send_request(conn_sock, SOCKET_ERR); @@ -376,7 +376,7 @@ vfio_mp_sync_socket_setup(void) * set up a local socket and tell it to listen for incoming connections */ int -pci_vfio_mp_sync_setup(void) +vfio_mp_sync_setup(void) { int ret; char thread_name[RTE_MAX_THREAD_NAME_LEN]; @@ -387,7 +387,7 @@ pci_vfio_mp_sync_setup(void) } ret = pthread_create(&socket_thread, NULL, - pci_vfio_mp_sync_thread, NULL); + vfio_mp_sync_thread, NULL); if (ret) { RTE_LOG(ERR, EAL, "Failed to create thread for communication with secondary processes!\n"); @@ -396,7 +396,7 @@ pci_vfio_mp_sync_setup(void) } /* Set thread_name for aid in debugging. */ - snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "pci-vfio-sync"); + snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "vfio-sync"); ret = rte_thread_setname(socket_thread, thread_name); if (ret) RTE_LOG(DEBUG, EAL, diff --git a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c index 0b612bb1..bddbdb07 100644 --- a/lib/librte_eal/linuxapp/eal/eal_xen_memory.c +++ b/lib/librte_eal/linuxapp/eal/eal_xen_memory.c @@ -167,8 +167,8 @@ rte_xen_mem_phy2mch(int32_t memseg_id, const phys_addr_t phy_addr) if (memseg_id == -1) { for (i = 0; i < RTE_MAX_MEMSEG; i++) { if ((phy_addr >= memseg[i].phys_addr) && - (phys_addr < memseg[i].phys_addr + - memseg[i].size)) { + (phy_addr < memseg[i].phys_addr + + memseg[i].len)) { memseg_id = i; break; } diff --git a/lib/librte_eal/linuxapp/eal/rte_eal_version.map b/lib/librte_eal/linuxapp/eal/rte_eal_version.map index 05134673..a617b9e4 100644 --- a/lib/librte_eal/linuxapp/eal/rte_eal_version.map +++ b/lib/librte_eal/linuxapp/eal/rte_eal_version.map @@ -138,6 +138,7 @@ DPDK_2.2 { rte_keepalive_mark_alive; rte_keepalive_register_core; rte_xen_dom0_supported; + rte_xen_mem_phy2mch; } DPDK_2.1; diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c index 45a5720e..df41e457 100644 --- a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c +++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c @@ -342,16 +342,6 @@ igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) goto fail_free; } - /* - * reserve device's PCI memory regions for use by this - * module - */ - err = pci_request_regions(dev, "igb_uio"); - if (err != 0) { - dev_err(&dev->dev, "Cannot request regions\n"); - goto fail_disable; - } - /* enable bus mastering on the device */ pci_set_master(dev); @@ -441,8 +431,6 @@ fail_release_iomem: igbuio_pci_release_iomem(&udev->info); if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(udev->pdev); - pci_release_regions(dev); -fail_disable: pci_disable_device(dev); fail_free: kfree(udev); @@ -460,7 +448,6 @@ igbuio_pci_remove(struct pci_dev *dev) igbuio_pci_release_iomem(&udev->info); if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(dev); - pci_release_regions(dev); pci_disable_device(dev); pci_set_drvdata(dev, NULL); kfree(udev); diff --git a/lib/librte_ether/rte_dev_info.h b/lib/librte_ether/rte_dev_info.h index 291bd4d7..574683d3 100644 --- a/lib/librte_ether/rte_dev_info.h +++ b/lib/librte_ether/rte_dev_info.h @@ -41,6 +41,7 @@ struct rte_dev_reg_info { void *data; /**< Buffer for return registers */ uint32_t offset; /**< Start register table location for access */ uint32_t length; /**< Number of registers to fetch */ + uint32_t width; /**< Size of device register */ uint32_t version; /**< Device version */ }; diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index eac260f1..0a6e3f18 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -1536,6 +1536,7 @@ rte_eth_xstats_get_names(uint8_t port_id, struct rte_eth_dev *dev; int cnt_used_entries; int cnt_expected_entries; + int cnt_driver_entries; uint32_t idx, id_queue; cnt_expected_entries = get_xstats_count(port_id); @@ -1545,16 +1546,7 @@ rte_eth_xstats_get_names(uint8_t port_id, /* port_id checked in get_xstats_count() */ dev = &rte_eth_devices[port_id]; - if (dev->dev_ops->xstats_get_names != NULL) { - cnt_used_entries = (*dev->dev_ops->xstats_get_names)( - dev, xstats_names, size); - if (cnt_used_entries < 0) - return cnt_used_entries; - } else - /* Driver itself does not support extended stats, but - * still have basic stats. - */ - cnt_used_entries = 0; + cnt_used_entries = 0; for (idx = 0; idx < RTE_NB_STATS; idx++) { snprintf(xstats_names[cnt_used_entries].name, @@ -1581,6 +1573,20 @@ rte_eth_xstats_get_names(uint8_t port_id, cnt_used_entries++; } } + + if (dev->dev_ops->xstats_get_names != NULL) { + /* If there are any driver-specific xstats, append them + * to end of list. + */ + cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( + dev, + xstats_names + cnt_used_entries, + size - cnt_used_entries); + if (cnt_driver_entries < 0) + return cnt_driver_entries; + cnt_used_entries += cnt_driver_entries; + } + return cnt_used_entries; } @@ -1628,7 +1634,6 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats, stats_ptr = RTE_PTR_ADD(ð_stats, rte_stats_strings[i].offset); val = *stats_ptr; - xstats[count].id = count + xcount; xstats[count++].value = val; } @@ -1639,7 +1644,6 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats, rte_rxq_stats_strings[i].offset + q * sizeof(uint64_t)); val = *stats_ptr; - xstats[count].id = count + xcount; xstats[count++].value = val; } } @@ -1651,11 +1655,13 @@ rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats, rte_txq_stats_strings[i].offset + q * sizeof(uint64_t)); val = *stats_ptr; - xstats[count].id = count + xcount; xstats[count++].value = val; } } + for (i = 0; i < count + xcount; i++) + xstats[i].id = i; + return count + xcount; } @@ -3305,18 +3311,6 @@ rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp) } int -rte_eth_dev_get_reg_length(uint8_t port_id) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg_length, -ENOTSUP); - return (*dev->dev_ops->get_reg_length)(dev); -} - -int rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info) { struct rte_eth_dev *dev; diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index 0f173231..4dac364a 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -1321,9 +1321,6 @@ typedef int (*eth_timesync_write_time)(struct rte_eth_dev *dev, const struct timespec *timestamp); /**< @internal Function used to get time from the device clock */ -typedef int (*eth_get_reg_length_t)(struct rte_eth_dev *dev); -/**< @internal Retrieve device register count */ - typedef int (*eth_get_reg_t)(struct rte_eth_dev *dev, struct rte_dev_reg_info *info); /**< @internal Retrieve registers */ @@ -1487,8 +1484,6 @@ struct eth_dev_ops { /** Query redirection table. */ reta_query_t reta_query; - eth_get_reg_length_t get_reg_length; - /**< Get # of registers */ eth_get_reg_t get_reg; /**< Get registers */ eth_get_eeprom_length_t get_eeprom_length; @@ -4061,25 +4056,15 @@ int rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo); /** - * Retrieve number of available registers for access - * - * @param port_id - * The port identifier of the Ethernet device. - * @return - * - (>=0) number of registers if successful. - * - (-ENOTSUP) if hardware doesn't support. - * - (-ENODEV) if *port_id* invalid. - * - others depends on the specific operations implementation. - */ -int rte_eth_dev_get_reg_length(uint8_t port_id); - -/** - * Retrieve device registers and register attributes + * Retrieve device registers and register attributes (number of registers and + * register size) * * @param port_id * The port identifier of the Ethernet device. * @param info - * The template includes buffer for register data and attribute to be filled. + * Pointer to rte_dev_reg_info structure to fill in. If info->data is + * NULL the function fills in the width and length fields. If non-NULL + * the registers are put into the buffer pointed at by the data field. * @return * - (0) if successful. * - (-ENOTSUP) if hardware doesn't support. diff --git a/lib/librte_ether/rte_ether_version.map b/lib/librte_ether/rte_ether_version.map index e1ccebe0..45ddf44c 100644 --- a/lib/librte_ether/rte_ether_version.map +++ b/lib/librte_ether/rte_ether_version.map @@ -36,7 +36,6 @@ DPDK_2.2 { rte_eth_dev_get_eeprom_length; rte_eth_dev_get_mtu; rte_eth_dev_get_reg_info; - rte_eth_dev_get_reg_length; rte_eth_dev_get_vlan_offload; rte_eth_devices; rte_eth_dev_info_get; diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c index e3cc3a7c..26e54f68 100644 --- a/lib/librte_hash/rte_cuckoo_hash.c +++ b/lib/librte_hash/rte_cuckoo_hash.c @@ -877,6 +877,26 @@ rte_hash_del_key(const struct rte_hash *h, const void *key) return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key)); } +int +rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position, + void **key) +{ + RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL); + + struct rte_hash_key *k, *keys = h->key_store; + k = (struct rte_hash_key *) ((char *) keys + (position + 1) * + h->key_entry_size); + *key = k->key; + + if (position != + __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key), + NULL)) { + return -ENOENT; + } + + return 0; +} + /* Lookup bulk stage 0: Prefetch input key */ static inline void lookup_stage0(unsigned *idx, uint64_t *lookup_mask, diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h index c9612fbd..622c1800 100644 --- a/lib/librte_hash/rte_hash.h +++ b/lib/librte_hash/rte_hash.h @@ -271,6 +271,24 @@ rte_hash_del_key(const struct rte_hash *h, const void *key); int32_t rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, hash_sig_t sig); +/** + * Find a key in the hash table given the position. + * This operation is multi-thread safe. + * + * @param h + * Hash table to get the key from. + * @param position + * Position returned when the key was inserted. + * @param key + * Output containing a pointer to the key + * @return + * - 0 if retrieved successfully + * - EINVAL if the parameters are invalid. + * - ENOENT if no valid key is found in the given position. + */ +int +rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position, + void **key); /** * Find a key-value pair in the hash table. diff --git a/lib/librte_hash/rte_hash_version.map b/lib/librte_hash/rte_hash_version.map index 4f25436e..52a2576f 100644 --- a/lib/librte_hash/rte_hash_version.map +++ b/lib/librte_hash/rte_hash_version.map @@ -38,3 +38,10 @@ DPDK_2.2 { rte_hash_set_cmp_func; } DPDK_2.1; + +DPDK_16.07 { + global: + + rte_hash_get_key_with_position; + +} DPDK_2.2; diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c index 601e5282..4846b897 100644 --- a/lib/librte_mbuf/rte_mbuf.c +++ b/lib/librte_mbuf/rte_mbuf.c @@ -156,6 +156,7 @@ rte_pktmbuf_pool_create(const char *name, unsigned n, struct rte_mempool *mp; struct rte_pktmbuf_pool_private mbp_priv; unsigned elt_size; + int ret; if (RTE_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN) != priv_size) { RTE_LOG(ERR, MBUF, "mbuf priv_size=%u is not aligned\n", @@ -181,8 +182,10 @@ rte_pktmbuf_pool_create(const char *name, unsigned n, } rte_pktmbuf_pool_init(mp, &mbp_priv); - if (rte_mempool_populate_default(mp) < 0) { + ret = rte_mempool_populate_default(mp); + if (ret < 0) { rte_mempool_free(mp); + rte_errno = -ret; return NULL; } diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index d78d02b7..6ec09063 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -524,7 +524,11 @@ rte_mempool_populate_default(struct rte_mempool *mp) if (mp->nb_mem_chunks != 0) return -EEXIST; - if (rte_eal_has_hugepages()) { + if (rte_xen_dom0_supported()) { + pg_sz = RTE_PGSIZE_2M; + pg_shift = rte_bsf32(pg_sz); + align = pg_sz; + } else if (rte_eal_has_hugepages()) { pg_shift = 0; /* not needed, zone is physically contiguous */ pg_sz = 0; align = RTE_CACHE_LINE_SIZE; diff --git a/lib/librte_pdump/rte_pdump.c b/lib/librte_pdump/rte_pdump.c index ee566cb2..22ed4769 100644 --- a/lib/librte_pdump/rte_pdump.c +++ b/lib/librte_pdump/rte_pdump.c @@ -677,7 +677,7 @@ pdump_create_client_socket(struct pdump_request *p) RTE_LOG(ERR, PDUMP, "Failed to get client socket path: %s:%d\n", __func__, __LINE__); - return -1; + goto exit; } addr.sun_family = AF_UNIX; addr_len = sizeof(struct sockaddr_un); @@ -728,6 +728,7 @@ pdump_create_client_socket(struct pdump_request *p) ret = server_resp.err_value; } while (0); +exit: close(socket_fd); unlink(addr.sun_path); return ret; diff --git a/lib/librte_power/channel_commands.h b/lib/librte_power/channel_commands.h index de6d9269..383897bf 100644 --- a/lib/librte_power/channel_commands.h +++ b/lib/librte_power/channel_commands.h @@ -40,13 +40,6 @@ extern "C" { #include <stdint.h> -/* Maximum number of CPUs */ -#define CHANNEL_CMDS_MAX_CPUS 64 -#if CHANNEL_CMDS_MAX_CPUS > 64 -#error Maximum number of cores is 64, overflow is guaranteed to \ - cause problems with VM Power Management -#endif - /* Maximum number of channels per VM */ #define CHANNEL_CMDS_MAX_VM_CHANNELS 64 diff --git a/lib/librte_table/Makefile b/lib/librte_table/Makefile index 7f02af3c..7a8a3f3c 100644 --- a/lib/librte_table/Makefile +++ b/lib/librte_table/Makefile @@ -80,6 +80,5 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_lpm ifeq ($(CONFIG_RTE_LIBRTE_ACL),y) DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_acl endif -DEPDIRS-$(CONFIG_RTE_LIBRTE_TABLE) += lib/librte_hash include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_vhost/vhost_user/vhost-net-user.c b/lib/librte_vhost/vhost_user/vhost-net-user.c index 94f1b923..a0d83f3c 100644 --- a/lib/librte_vhost/vhost_user/vhost-net-user.c +++ b/lib/librte_vhost/vhost_user/vhost-net-user.c @@ -439,6 +439,7 @@ create_unix_socket(const char *path, struct sockaddr_un *un, bool is_server) memset(un, 0, sizeof(*un)); un->sun_family = AF_UNIX; strncpy(un->sun_path, path, sizeof(un->sun_path)); + un->sun_path[sizeof(un->sun_path) - 1] = '\0'; return fd; } @@ -576,6 +577,12 @@ vhost_user_create_client(struct vhost_user_socket *vsocket) RTE_LOG(ERR, VHOST_CONFIG, "%s: reconnecting...\n", path); reconn = malloc(sizeof(*reconn)); + if (reconn == NULL) { + RTE_LOG(ERR, VHOST_CONFIG, + "failed to allocate memory for reconnect\n"); + close(fd); + return -1; + } reconn->un = un; reconn->fd = fd; reconn->vsocket = vsocket; @@ -617,8 +624,11 @@ rte_vhost_driver_register(const char *path, uint64_t flags) if ((flags & RTE_VHOST_USER_CLIENT) != 0) { vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT); if (vsocket->reconnect && reconn_tid == 0) { - if (vhost_user_reconnect_init() < 0) + if (vhost_user_reconnect_init() < 0) { + free(vsocket->path); + free(vsocket); goto out; + } } ret = vhost_user_create_client(vsocket); } else { diff --git a/mk/internal/rte.compile-pre.mk b/mk/internal/rte.compile-pre.mk index b9bff4a7..9c25ff6c 100644 --- a/mk/internal/rte.compile-pre.mk +++ b/mk/internal/rte.compile-pre.mk @@ -84,10 +84,22 @@ C_TO_O = $(CC) -Wp,-MD,$(call obj2dep,$(@)).tmp $(CFLAGS) \ C_TO_O_STR = $(subst ','\'',$(C_TO_O)) #'# fix syntax highlight C_TO_O_DISP = $(if $(V),"$(C_TO_O_STR)"," CC $(@)") endif +PMDINFO_GEN = $(RTE_SDK_BIN)/app/pmdinfogen $@ $@.pmd.c +PMDINFO_CC = $(CC) $(CFLAGS) -c -o $@.pmd.o $@.pmd.c +PMDINFO_LD = $(CROSS)ld $(LDFLAGS) -r -o $@.o $@.pmd.o $@ +PMDINFO_TO_O = if grep -q 'PMD_REGISTER_DRIVER(.*)' $<; then \ + echo "$(if $V,$(PMDINFO_GEN), PMDINFO $@.pmd.c)" && \ + $(PMDINFO_GEN) && \ + echo "$(if $V,$(PMDINFO_CC), CC $@.pmd.o)" && \ + $(PMDINFO_CC) && \ + echo "$(if $V,$(PMDINFO_LD), LD $@)" && \ + $(PMDINFO_LD) && \ + mv -f $@.o $@; fi C_TO_O_CMD = 'cmd_$@ = $(C_TO_O_STR)' C_TO_O_DO = @set -e; \ echo $(C_TO_O_DISP); \ $(C_TO_O) && \ + $(PMDINFO_TO_O) && \ echo $(C_TO_O_CMD) > $(call obj2cmd,$(@)) && \ sed 's,'$@':,dep_'$@' =,' $(call obj2dep,$(@)).tmp > $(call obj2dep,$(@)) && \ rm -f $(call obj2dep,$(@)).tmp diff --git a/mk/rte.hostapp.mk b/mk/rte.hostapp.mk index c44d0f8a..07b391c2 100644 --- a/mk/rte.hostapp.mk +++ b/mk/rte.hostapp.mk @@ -41,7 +41,7 @@ include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk VPATH += $(SRCDIR) _BUILD = $(HOSTAPP) -_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) $(RTE_OUTPUT)/hostapp/$(HOSTAPP) +_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) $(RTE_OUTPUT)/app/$(HOSTAPP) _CLEAN = doclean .PHONY: all @@ -95,10 +95,10 @@ $(HOSTAPP): $(OBJS-y) $(LDLIBS_FILES) FORCE # # install app in $(RTE_OUTPUT)/hostapp # -$(RTE_OUTPUT)/hostapp/$(HOSTAPP): $(HOSTAPP) +$(RTE_OUTPUT)/app/$(HOSTAPP): $(HOSTAPP) @echo " INSTALL-HOSTAPP $(HOSTAPP)" - @[ -d $(RTE_OUTPUT)/hostapp ] || mkdir -p $(RTE_OUTPUT)/hostapp - $(Q)cp -f $(HOSTAPP) $(RTE_OUTPUT)/hostapp + @[ -d $(RTE_OUTPUT)/app ] || mkdir -p $(RTE_OUTPUT)/app + $(Q)cp -f $(HOSTAPP) $(RTE_OUTPUT)/app # # Clean all generated files diff --git a/mk/rte.sdkbuild.mk b/mk/rte.sdkbuild.mk index eec52412..23fcf1e1 100644 --- a/mk/rte.sdkbuild.mk +++ b/mk/rte.sdkbuild.mk @@ -49,6 +49,7 @@ $(1): $(sort $(LOCAL_DEPDIRS-$(1))) endef $(foreach d,$(ROOTDIRS-y),$(eval $(call depdirs_rule,$(d)))) +drivers: | buildtools # # build and clean targets @@ -63,7 +64,7 @@ build: $(ROOTDIRS-y) .PHONY: clean clean: $(CLEANDIRS) @rm -rf $(RTE_OUTPUT)/include $(RTE_OUTPUT)/app \ - $(RTE_OUTPUT)/hostapp $(RTE_OUTPUT)/lib \ + $(RTE_OUTPUT)/lib \ $(RTE_OUTPUT)/hostlib $(RTE_OUTPUT)/kmod @[ -d $(RTE_OUTPUT)/include ] || mkdir -p $(RTE_OUTPUT)/include @$(RTE_SDK)/scripts/gen-config-h.sh $(RTE_OUTPUT)/.config \ diff --git a/mk/rte.sdkconfig.mk b/mk/rte.sdkconfig.mk index a3acfe64..e93237fe 100644 --- a/mk/rte.sdkconfig.mk +++ b/mk/rte.sdkconfig.mk @@ -79,11 +79,20 @@ $(RTE_OUTPUT): ifdef NODOTCONF $(RTE_OUTPUT)/.config: ; else +# Generate config from template, if there are duplicates keep only the last. +# To do so the temp config is checked for duplicate keys with cut/sort/uniq +# Then for each of those identified duplicates as long as there are more than +# just one left the last match is removed. $(RTE_OUTPUT)/.config: $(RTE_CONFIG_TEMPLATE) FORCE | $(RTE_OUTPUT) $(Q)if [ "$(RTE_CONFIG_TEMPLATE)" != "" -a -f "$(RTE_CONFIG_TEMPLATE)" ]; then \ $(CPP) -undef -P -x assembler-with-cpp \ -ffreestanding \ -o $(RTE_OUTPUT)/.config_tmp $(RTE_CONFIG_TEMPLATE) ; \ + for config in $$(grep -v "^#" $(RTE_OUTPUT)/.config_tmp | cut -d"=" -f1 | sort | uniq -d); do \ + while [ $$(grep "^$${config}=" $(RTE_OUTPUT)/.config_tmp -c ) -gt 1 ]; do \ + sed -i "0,/^$${config}=/{//d}" $(RTE_OUTPUT)/.config_tmp; \ + done; \ + done; \ if ! cmp -s $(RTE_OUTPUT)/.config_tmp $(RTE_OUTPUT)/.config; then \ cp $(RTE_OUTPUT)/.config_tmp $(RTE_OUTPUT)/.config ; \ cp $(RTE_OUTPUT)/.config_tmp $(RTE_OUTPUT)/.config.orig ; \ @@ -108,7 +117,7 @@ $(RTE_OUTPUT)/Makefile: | $(RTE_OUTPUT) # if NODOTCONF variable is defined, don't try to rebuild .config $(RTE_OUTPUT)/include/rte_config.h: $(RTE_OUTPUT)/.config $(Q)rm -rf $(RTE_OUTPUT)/include $(RTE_OUTPUT)/app \ - $(RTE_OUTPUT)/hostapp $(RTE_OUTPUT)/lib \ + $(RTE_OUTPUT)/lib \ $(RTE_OUTPUT)/hostlib $(RTE_OUTPUT)/kmod $(RTE_OUTPUT)/build $(Q)mkdir -p $(RTE_OUTPUT)/include $(Q)$(RTE_SDK)/scripts/gen-config-h.sh $(RTE_OUTPUT)/.config \ diff --git a/mk/rte.sdkinstall.mk b/mk/rte.sdkinstall.mk index abdab0f0..7cd352c7 100644 --- a/mk/rte.sdkinstall.mk +++ b/mk/rte.sdkinstall.mk @@ -117,6 +117,7 @@ install-runtime: $(Q)cp -a $O/lib/* $(DESTDIR)$(libdir) $(Q)$(call rte_mkdir, $(DESTDIR)$(bindir)) $(Q)tar -cf - -C $O --exclude 'app/*.map' \ + --exclude app/pmdinfogen \ --exclude 'app/cmdline*' --exclude app/test \ --exclude app/testacl --exclude app/testpipeline app | \ tar -xf - -C $(DESTDIR)$(bindir) --strip-components=1 \ @@ -126,6 +127,8 @@ install-runtime: $(Q)$(call rte_mkdir, $(DESTDIR)$(sbindir)) $(Q)$(call rte_symlink, $(DESTDIR)$(datadir)/tools/dpdk_nic_bind.py, \ $(DESTDIR)$(sbindir)/dpdk_nic_bind) + $(Q)$(call rte_symlink, $(DESTDIR)$(datadir)/tools/pmdinfo.py, \ + $(DESTDIR)$(bindir)/dpdk_pmdinfo) install-kmod: ifneq ($(wildcard $O/kmod/*),) @@ -141,8 +144,9 @@ install-sdk: $(Q)$(call rte_mkdir, $(DESTDIR)$(sdkdir)) $(Q)cp -a $(RTE_SDK)/mk $(DESTDIR)$(sdkdir) $(Q)cp -a $(RTE_SDK)/scripts $(DESTDIR)$(sdkdir) - $(Q)$(call rte_mkdir, $(DESTDIR)$(targetdir)) + $(Q)$(call rte_mkdir, $(DESTDIR)$(targetdir)/app) $(Q)cp -a $O/.config $(DESTDIR)$(targetdir) + $(Q)cp -a $O/app/pmdinfogen $(DESTDIR)$(targetdir)/app $(Q)$(call rte_symlink, $(DESTDIR)$(includedir), $(DESTDIR)$(targetdir)/include) $(Q)$(call rte_symlink, $(DESTDIR)$(libdir), $(DESTDIR)$(targetdir)/lib) diff --git a/scripts/check-git-log.sh b/scripts/check-git-log.sh index 7d2c7ee8..e416aea1 100755 --- a/scripts/check-git-log.sh +++ b/scripts/check-git-log.sh @@ -49,10 +49,12 @@ fi range=${1:-origin/master..} +commits=$(git log --format='%h' $range) headlines=$(git log --format='%s' $range) bodylines=$(git log --format='%b' $range) -tags=$(git log --format='%b' $range | grep -i -e 'by *:' -e 'fix.*:') fixes=$(git log --format='%h %s' $range | grep -i ': *fix' | cut -d' ' -f1) +tags=$(git log --format='%b' $range | grep -i -e 'by *:' -e 'fix.*:') +bytag='\(Reported\|Suggested\|Signed-off\|Acked\|Reviewed\|Tested\)-by:' # check headline format (spacing, no punctuation, no code) bad=$(echo "$headlines" | grep --color=always \ @@ -62,12 +64,30 @@ bad=$(echo "$headlines" | grep --color=always \ -e '\.$' \ -e '[,;!?&|]' \ -e ':.*_' \ - -e '^[^:]*$' \ + -e '^[^:]\+$' \ -e ':[^ ]' \ -e ' :' \ | sed 's,^,\t,') [ -z "$bad" ] || printf "Wrong headline format:\n$bad\n" +# check headline prefix when touching only drivers, e.g. net/<driver name> +bad=$(for commit in $commits ; do + headline=$(git log --format='%s' -1 $commit) + files=$(git diff-tree --no-commit-id --name-only -r $commit) + [ -z "$(echo "$files" | grep -v '^\(drivers\|doc\|config\)/')" ] || + continue + drv=$(echo "$files" | grep '^drivers/' | cut -d "/" -f 2,3 | sort -u) + drvgrp=$(echo "$drv" | cut -d "/" -f 1 | uniq) + if [ $(echo "$drvgrp" | wc -l) -gt 1 ] ; then + echo "$headline" | grep -v '^drivers:' + elif [ $(echo "$drv" | wc -l) -gt 1 ] ; then + echo "$headline" | grep -v "^$drvgrp" + else + echo "$headline" | grep -v "^$drv" + fi +done | sed 's,^,\t,') +[ -z "$bad" ] || printf "Wrong headline prefix:\n$bad\n" + # check headline label for common typos bad=$(echo "$headlines" | grep --color=always \ -e '^example[:/]' \ @@ -89,11 +109,14 @@ bad=$(echo "$headlines" | grep --color=always \ bad=$(echo "$headlines" | grep -E --color=always \ -e '\<(rx|tx|RX|TX)\>' \ -e '\<[pv]f\>' \ + -e '\<[hsf]w\>' \ -e '\<l[234]\>' \ + -e ':.*\<api\>' \ -e ':.*\<dma\>' \ -e ':.*\<pci\>' \ -e ':.*\<mtu\>' \ -e ':.*\<mac\>' \ + -e ':.*\<numa\>' \ -e ':.*\<vlan\>' \ -e ':.*\<rss\>' \ -e ':.*\<freebsd\>' \ @@ -106,27 +129,44 @@ bad=$(echo "$headlines" | grep -E --color=always \ | sed 's,^,\t,') [ -z "$bad" ] || printf "Wrong headline lowercase:\n$bad\n" +# special case check for VMDq to give good error message +bad=$(echo "$headlines" | grep -E --color=always \ + -e '\<(vmdq|VMDQ)\>' \ + | sed 's,^,\t,') +[ -z "$bad" ] || printf "Wrong headline capitalization, use 'VMDq':\n$bad\n" + # check headline length (60 max) -bad=$(echo "$headlines" | awk 'length>60 {print}' | sed 's,^,\t,') +bad=$(echo "$headlines" | + awk 'length>60 {print}' | + sed 's,^,\t,') [ -z "$bad" ] || printf "Headline too long:\n$bad\n" # check body lines length (75 max) -bad=$(echo "$bodylines" | grep -v '^Fixes:' | awk 'length>75 {print}' | sed 's,^,\t,') +bad=$(echo "$bodylines" | grep -v '^Fixes:' | + awk 'length>75 {print}' | + sed 's,^,\t,') [ -z "$bad" ] || printf "Line too long:\n$bad\n" # check starting commit message with "It" -bad=$(echo "$bodylines" | head -n1 | grep -E --color=always \ - -ie '^It ' \ - | sed 's,^,\t,') +bad=$(for commit in $commits ; do + firstbodyline=$(git log --format='%b' -1 $commit | head -n1) + echo "$firstbodyline" | grep --color=always -ie '^It ' +done | sed 's,^,\t,') [ -z "$bad" ] || printf "Wrong beginning of commit message:\n$bad\n" # check tags spelling bad=$(echo "$tags" | - grep -v '^\(Reported\|Suggested\|Signed-off\|Acked\|Reviewed\|Tested\)-by: [^,]* <.*@.*>$' | + grep -v "^$bytag [^,]* <.*@.*>$" | grep -v '^Fixes: [0-9a-f]\{7\}[0-9a-f]* (".*")$' | sed 's,^.,\t&,') [ -z "$bad" ] || printf "Wrong tag:\n$bad\n" +# check blank line after last Fixes: tag +bad=$(echo "$bodylines" | + sed -n 'N;/\nFixes:/D;/\n$/D;/^Fixes:/P' | + sed 's,^.,\t&,') +[ -z "$bad" ] || printf "Missing blank line after 'Fixes' tag:\n$bad\n" + # check missing Fixes: tag bad=$(for fix in $fixes ; do git log --format='%b' -1 $fix | grep -q '^Fixes: ' || diff --git a/tools/pmdinfo.py b/tools/pmdinfo.py new file mode 100755 index 00000000..662034af --- /dev/null +++ b/tools/pmdinfo.py @@ -0,0 +1,638 @@ +#!/usr/bin/env python +# ------------------------------------------------------------------------- +# scripts/pmdinfo.py +# +# Utility to dump PMD_INFO_STRING support from an object file +# +# ------------------------------------------------------------------------- +import os +import sys +from optparse import OptionParser +import string +import json +import platform + +# For running from development directory. It should take precedence over the +# installed pyelftools. +sys.path.insert(0, '.') + + +from elftools import __version__ +from elftools.common.exceptions import ELFError +from elftools.common.py3compat import ( + ifilter, byte2int, bytes2str, itervalues, str2bytes) +from elftools.elf.elffile import ELFFile +from elftools.elf.dynamic import DynamicSection, DynamicSegment +from elftools.elf.enums import ENUM_D_TAG +from elftools.elf.segments import InterpSegment +from elftools.elf.sections import SymbolTableSection +from elftools.elf.gnuversions import ( + GNUVerSymSection, GNUVerDefSection, + GNUVerNeedSection, +) +from elftools.elf.relocation import RelocationSection +from elftools.elf.descriptions import ( + describe_ei_class, describe_ei_data, describe_ei_version, + describe_ei_osabi, describe_e_type, describe_e_machine, + describe_e_version_numeric, describe_p_type, describe_p_flags, + describe_sh_type, describe_sh_flags, + describe_symbol_type, describe_symbol_bind, describe_symbol_visibility, + describe_symbol_shndx, describe_reloc_type, describe_dyn_tag, + describe_ver_flags, +) +from elftools.elf.constants import E_FLAGS +from elftools.dwarf.dwarfinfo import DWARFInfo +from elftools.dwarf.descriptions import ( + describe_reg_name, describe_attr_value, set_global_machine_arch, + describe_CFI_instructions, describe_CFI_register_rule, + describe_CFI_CFA_rule, +) +from elftools.dwarf.constants import ( + DW_LNS_copy, DW_LNS_set_file, DW_LNE_define_file) +from elftools.dwarf.callframe import CIE, FDE + +raw_output = False +pcidb = None + +# =========================================== + + +class Vendor: + """ + Class for vendors. This is the top level class + for the devices belong to a specific vendor. + self.devices is the device dictionary + subdevices are in each device. + """ + + def __init__(self, vendorStr): + """ + Class initializes with the raw line from pci.ids + Parsing takes place inside __init__ + """ + self.ID = vendorStr.split()[0] + self.name = vendorStr.replace("%s " % self.ID, "").rstrip() + self.devices = {} + + def addDevice(self, deviceStr): + """ + Adds a device to self.devices + takes the raw line from pci.ids + """ + s = deviceStr.strip() + devID = s.split()[0] + if devID in self.devices: + pass + else: + self.devices[devID] = Device(deviceStr) + + def report(self): + print self.ID, self.name + for id, dev in self.devices.items(): + dev.report() + + def find_device(self, devid): + # convert to a hex string and remove 0x + devid = hex(devid)[2:] + try: + return self.devices[devid] + except: + return Device("%s Unknown Device" % devid) + + +class Device: + + def __init__(self, deviceStr): + """ + Class for each device. + Each vendor has its own devices dictionary. + """ + s = deviceStr.strip() + self.ID = s.split()[0] + self.name = s.replace("%s " % self.ID, "") + self.subdevices = {} + + def report(self): + print "\t%s\t%s" % (self.ID, self.name) + for subID, subdev in self.subdevices.items(): + subdev.report() + + def addSubDevice(self, subDeviceStr): + """ + Adds a subvendor, subdevice to device. + Uses raw line from pci.ids + """ + s = subDeviceStr.strip() + spl = s.split() + subVendorID = spl[0] + subDeviceID = spl[1] + subDeviceName = s.split(" ")[-1] + devID = "%s:%s" % (subVendorID, subDeviceID) + self.subdevices[devID] = SubDevice( + subVendorID, subDeviceID, subDeviceName) + + def find_subid(self, subven, subdev): + subven = hex(subven)[2:] + subdev = hex(subdev)[2:] + devid = "%s:%s" % (subven, subdev) + + try: + return self.subdevices[devid] + except: + if (subven == "ffff" and subdev == "ffff"): + return SubDevice("ffff", "ffff", "(All Subdevices)") + else: + return SubDevice(subven, subdev, "(Unknown Subdevice)") + + +class SubDevice: + """ + Class for subdevices. + """ + + def __init__(self, vendor, device, name): + """ + Class initializes with vendorid, deviceid and name + """ + self.vendorID = vendor + self.deviceID = device + self.name = name + + def report(self): + print "\t\t%s\t%s\t%s" % (self.vendorID, self.deviceID, self.name) + + +class PCIIds: + """ + Top class for all pci.ids entries. + All queries will be asked to this class. + PCIIds.vendors["0e11"].devices["0046"].\ + subdevices["0e11:4091"].name = "Smart Array 6i" + """ + + def __init__(self, filename): + """ + Prepares the directories. + Checks local data file. + Tries to load from local, if not found, downloads from web + """ + self.version = "" + self.date = "" + self.vendors = {} + self.contents = None + self.readLocal(filename) + self.parse() + + def reportVendors(self): + """Reports the vendors + """ + for vid, v in self.vendors.items(): + print v.ID, v.name + + def report(self, vendor=None): + """ + Reports everything for all vendors or a specific vendor + PCIIds.report() reports everything + PCIIDs.report("0e11") reports only "Compaq Computer Corporation" + """ + if vendor is not None: + self.vendors[vendor].report() + else: + for vID, v in self.vendors.items(): + v.report() + + def find_vendor(self, vid): + # convert vid to a hex string and remove the 0x + vid = hex(vid)[2:] + + try: + return self.vendors[vid] + except: + return Vendor("%s Unknown Vendor" % (vid)) + + def findDate(self, content): + for l in content: + if l.find("Date:") > -1: + return l.split()[-2].replace("-", "") + return None + + def parse(self): + if len(self.contents) < 1: + print "data/%s-pci.ids not found" % self.date + else: + vendorID = "" + deviceID = "" + for l in self.contents: + if l[0] == "#": + continue + elif len(l.strip()) == 0: + continue + else: + if l.find("\t\t") == 0: + self.vendors[vendorID].devices[ + deviceID].addSubDevice(l) + elif l.find("\t") == 0: + deviceID = l.strip().split()[0] + self.vendors[vendorID].addDevice(l) + else: + vendorID = l.split()[0] + self.vendors[vendorID] = Vendor(l) + + def readLocal(self, filename): + """ + Reads the local file + """ + self.contents = open(filename).readlines() + self.date = self.findDate(self.contents) + + def loadLocal(self): + """ + Loads database from local. If there is no file, + it creates a new one from web + """ + self.date = idsfile[0].split("/")[1].split("-")[0] + self.readLocal() + + +# ======================================= + +def search_file(filename, search_path): + """ Given a search path, find file with requested name """ + for path in string.split(search_path, ":"): + candidate = os.path.join(path, filename) + if os.path.exists(candidate): + return os.path.abspath(candidate) + return None + + +class ReadElf(object): + """ display_* methods are used to emit output into the output stream + """ + + def __init__(self, file, output): + """ file: + stream object with the ELF file to read + + output: + output stream to write to + """ + self.elffile = ELFFile(file) + self.output = output + + # Lazily initialized if a debug dump is requested + self._dwarfinfo = None + + self._versioninfo = None + + def _section_from_spec(self, spec): + """ Retrieve a section given a "spec" (either number or name). + Return None if no such section exists in the file. + """ + try: + num = int(spec) + if num < self.elffile.num_sections(): + return self.elffile.get_section(num) + else: + return None + except ValueError: + # Not a number. Must be a name then + return self.elffile.get_section_by_name(str2bytes(spec)) + + def pretty_print_pmdinfo(self, pmdinfo): + global pcidb + + for i in pmdinfo["pci_ids"]: + vendor = pcidb.find_vendor(i[0]) + device = vendor.find_device(i[1]) + subdev = device.find_subid(i[2], i[3]) + print("%s (%s) : %s (%s) %s" % + (vendor.name, vendor.ID, device.name, + device.ID, subdev.name)) + + def parse_pmd_info_string(self, mystring): + global raw_output + global pcidb + + optional_pmd_info = [{'id': 'params', 'tag': 'PMD PARAMETERS'}] + + i = mystring.index("=") + mystring = mystring[i + 2:] + pmdinfo = json.loads(mystring) + + if raw_output: + print(pmdinfo) + return + + print("PMD NAME: " + pmdinfo["name"]) + for i in optional_pmd_info: + try: + print("%s: %s" % (i['tag'], pmdinfo[i['id']])) + except KeyError as e: + continue + + if (len(pmdinfo["pci_ids"]) != 0): + print("PMD HW SUPPORT:") + if pcidb is not None: + self.pretty_print_pmdinfo(pmdinfo) + else: + print("VENDOR\t DEVICE\t SUBVENDOR\t SUBDEVICE") + for i in pmdinfo["pci_ids"]: + print("0x%04x\t 0x%04x\t 0x%04x\t\t 0x%04x" % + (i[0], i[1], i[2], i[3])) + + print("") + + def display_pmd_info_strings(self, section_spec): + """ Display a strings dump of a section. section_spec is either a + section number or a name. + """ + section = self._section_from_spec(section_spec) + if section is None: + return + + data = section.data() + dataptr = 0 + + while dataptr < len(data): + while (dataptr < len(data) and + not (32 <= byte2int(data[dataptr]) <= 127)): + dataptr += 1 + + if dataptr >= len(data): + break + + endptr = dataptr + while endptr < len(data) and byte2int(data[endptr]) != 0: + endptr += 1 + + mystring = bytes2str(data[dataptr:endptr]) + rc = mystring.find("PMD_INFO_STRING") + if (rc != -1): + self.parse_pmd_info_string(mystring) + + dataptr = endptr + + def find_librte_eal(self, section): + for tag in section.iter_tags(): + if tag.entry.d_tag == 'DT_NEEDED': + if "librte_eal" in tag.needed: + return tag.needed + return None + + def search_for_autoload_path(self): + scanelf = self + scanfile = None + library = None + + section = self._section_from_spec(".dynamic") + try: + eallib = self.find_librte_eal(section) + if eallib is not None: + ldlibpath = os.environ.get('LD_LIBRARY_PATH') + if ldlibpath is None: + ldlibpath = "" + dtr = self.get_dt_runpath(section) + library = search_file(eallib, + dtr + ":" + ldlibpath + + ":/usr/lib64:/lib64:/usr/lib:/lib") + if library is None: + return (None, None) + if raw_output is False: + print("Scanning for autoload path in %s" % library) + scanfile = open(library, 'rb') + scanelf = ReadElf(scanfile, sys.stdout) + except AttributeError: + # Not a dynamic binary + pass + except ELFError: + scanfile.close() + return (None, None) + + section = scanelf._section_from_spec(".rodata") + if section is None: + if scanfile is not None: + scanfile.close() + return (None, None) + + data = section.data() + dataptr = 0 + + while dataptr < len(data): + while (dataptr < len(data) and + not (32 <= byte2int(data[dataptr]) <= 127)): + dataptr += 1 + + if dataptr >= len(data): + break + + endptr = dataptr + while endptr < len(data) and byte2int(data[endptr]) != 0: + endptr += 1 + + mystring = bytes2str(data[dataptr:endptr]) + rc = mystring.find("DPDK_PLUGIN_PATH") + if (rc != -1): + rc = mystring.find("=") + return (mystring[rc + 1:], library) + + dataptr = endptr + if scanfile is not None: + scanfile.close() + return (None, None) + + def get_dt_runpath(self, dynsec): + for tag in dynsec.iter_tags(): + if tag.entry.d_tag == 'DT_RUNPATH': + return tag.runpath + return "" + + def process_dt_needed_entries(self): + """ Look to see if there are any DT_NEEDED entries in the binary + And process those if there are + """ + global raw_output + runpath = "" + ldlibpath = os.environ.get('LD_LIBRARY_PATH') + if ldlibpath is None: + ldlibpath = "" + + dynsec = self._section_from_spec(".dynamic") + try: + runpath = self.get_dt_runpath(dynsec) + except AttributeError: + # dynsec is None, just return + return + + for tag in dynsec.iter_tags(): + if tag.entry.d_tag == 'DT_NEEDED': + rc = tag.needed.find("librte_pmd") + if (rc != -1): + library = search_file(tag.needed, + runpath + ":" + ldlibpath + + ":/usr/lib64:/lib64:/usr/lib:/lib") + if library is not None: + if raw_output is False: + print("Scanning %s for pmd information" % library) + with open(library, 'rb') as file: + try: + libelf = ReadElf(file, sys.stdout) + except ELFError as e: + print("%s is no an ELF file" % library) + continue + libelf.process_dt_needed_entries() + libelf.display_pmd_info_strings(".rodata") + file.close() + + +def scan_autoload_path(autoload_path): + global raw_output + + if os.path.exists(autoload_path) is False: + return + + try: + dirs = os.listdir(autoload_path) + except OSError as e: + # Couldn't read the directory, give up + return + + for d in dirs: + dpath = os.path.join(autoload_path, d) + if os.path.isdir(dpath): + scan_autoload_path(dpath) + if os.path.isfile(dpath): + try: + file = open(dpath, 'rb') + readelf = ReadElf(file, sys.stdout) + except ELFError as e: + # this is likely not an elf file, skip it + continue + except IOError as e: + # No permission to read the file, skip it + continue + + if raw_output is False: + print("Hw Support for library %s" % d) + readelf.display_pmd_info_strings(".rodata") + file.close() + + +def scan_for_autoload_pmds(dpdk_path): + """ + search the specified application or path for a pmd autoload path + then scan said path for pmds and report hw support + """ + global raw_output + + if (os.path.isfile(dpdk_path) is False): + if raw_output is False: + print("Must specify a file name") + return + + file = open(dpdk_path, 'rb') + try: + readelf = ReadElf(file, sys.stdout) + except ElfError as e: + if raw_output is False: + print("Unable to parse %s" % file) + return + + (autoload_path, scannedfile) = readelf.search_for_autoload_path() + if (autoload_path is None or autoload_path is ""): + if (raw_output is False): + print("No autoload path configured in %s" % dpdk_path) + return + if (raw_output is False): + if (scannedfile is None): + scannedfile = dpdk_path + print("Found autoload path %s in %s" % (autoload_path, scannedfile)) + + file.close() + if (raw_output is False): + print("Discovered Autoload HW Support:") + scan_autoload_path(autoload_path) + return + + +def main(stream=None): + global raw_output + global pcidb + + pcifile_default = "./pci.ids" # for unknown OS's assume local file + if platform.system() == 'Linux': + pcifile_default = "/usr/share/hwdata/pci.ids" + elif platform.system() == 'FreeBSD': + pcifile_default = "/usr/local/share/pciids/pci.ids" + if not os.path.exists(pcifile_default): + pcifile_default = "/usr/share/misc/pci_vendors" + + optparser = OptionParser( + usage='usage: %prog [-hrtp] [-d <pci id file] <elf-file>', + description="Dump pmd hardware support info", + add_help_option=True, + prog='pmdinfo.py') + optparser.add_option('-r', '--raw', + action='store_true', dest='raw_output', + help='Dump raw json strings') + optparser.add_option("-d", "--pcidb", dest="pcifile", + help="specify a pci database " + "to get vendor names from", + default=pcifile_default, metavar="FILE") + optparser.add_option("-t", "--table", dest="tblout", + help="output information on hw support as a hex table", + action='store_true') + optparser.add_option("-p", "--plugindir", dest="pdir", + help="scan dpdk for autoload plugins", + action='store_true') + + options, args = optparser.parse_args() + + if options.raw_output: + raw_output = True + + if options.pcifile: + pcidb = PCIIds(options.pcifile) + if pcidb is None: + print("Pci DB file not found") + exit(1) + + if options.tblout: + options.pcifile = None + pcidb = None + + if (len(args) == 0): + optparser.print_usage() + exit(1) + + if options.pdir is True: + exit(scan_for_autoload_pmds(args[0])) + + ldlibpath = os.environ.get('LD_LIBRARY_PATH') + if (ldlibpath is None): + ldlibpath = "" + + if (os.path.exists(args[0]) is True): + myelffile = args[0] + else: + myelffile = search_file( + args[0], ldlibpath + ":/usr/lib64:/lib64:/usr/lib:/lib") + + if (myelffile is None): + print("File not found") + sys.exit(1) + + with open(myelffile, 'rb') as file: + try: + readelf = ReadElf(file, sys.stdout) + readelf.process_dt_needed_entries() + readelf.display_pmd_info_strings(".rodata") + sys.exit(0) + + except ELFError as ex: + sys.stderr.write('ELF error: %s\n' % ex) + sys.exit(1) + + +# ------------------------------------------------------------------------- +if __name__ == '__main__': + main() |