diff options
102 files changed, 1240 insertions, 603 deletions
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index 7171bd63..7dbe2456 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -1840,7 +1840,7 @@ cmdline_parse_inst_t cmd_config_rxtx_queue = { .data = NULL, .help_str = "port X rxq|txq ID start|stop", .tokens = { - (void *)&cmd_config_speed_all_port, + (void *)&cmd_config_rxtx_queue_port, (void *)&cmd_config_rxtx_queue_portid, (void *)&cmd_config_rxtx_queue_rxtxq, (void *)&cmd_config_rxtx_queue_qid, diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index 69fa04be..130ff148 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -163,15 +163,11 @@ nic_stats_display(portid_t port_id) struct rte_eth_stats stats; struct rte_port *port = &ports[port_id]; uint8_t i; - portid_t pid; static const char *nic_stats_border = "########################"; if (port_id_is_invalid(port_id, ENABLED_WARN)) { - printf("Valid port range is [0"); - FOREACH_PORT(pid, ports) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return; } rte_eth_stats_get(port_id, &stats); @@ -245,13 +241,8 @@ nic_stats_display(portid_t port_id) void nic_stats_clear(portid_t port_id) { - portid_t pid; - if (port_id_is_invalid(port_id, ENABLED_WARN)) { - printf("Valid port range is [0"); - FOREACH_PORT(pid, ports) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return; } rte_eth_stats_reset(port_id); @@ -325,15 +316,11 @@ nic_stats_mapping_display(portid_t port_id) { struct rte_port *port = &ports[port_id]; uint16_t i; - portid_t pid; static const char *nic_stats_mapping_border = "########################"; if (port_id_is_invalid(port_id, ENABLED_WARN)) { - printf("Valid port range is [0"); - FOREACH_PORT(pid, ports) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return; } @@ -445,13 +432,9 @@ port_infos_display(portid_t port_id) int vlan_offload; struct rte_mempool * mp; static const char *info_border = "*********************"; - portid_t pid; if (port_id_is_invalid(port_id, ENABLED_WARN)) { - printf("Valid port range is [0"); - FOREACH_PORT(pid, ports) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return; } port = &ports[port_id]; @@ -554,6 +537,17 @@ port_id_is_invalid(portid_t port_id, enum print_warning warning) return 1; } +void print_valid_ports(void) +{ + portid_t pid; + + printf("The valid ports array is ["); + FOREACH_PORT(pid, ports) { + printf(" %d", pid); + } + printf(" ]\n"); +} + static int vlan_id_is_invalid(uint16_t vlan_id) { diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index 09233917..4a74ada2 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -370,7 +370,6 @@ parse_portnuma_config(const char *q_arg) }; unsigned long int_fld[_NUM_FLD]; char *str_fld[_NUM_FLD]; - portid_t pid; /* reset from value set at definition */ while ((p = strchr(p0,'(')) != NULL) { @@ -394,10 +393,7 @@ parse_portnuma_config(const char *q_arg) port_id = (portid_t)int_fld[FLD_PORT]; if (port_id_is_invalid(port_id, ENABLED_WARN) || port_id == (portid_t)RTE_PORT_ALL) { - printf("Valid port range is [0"); - FOREACH_PORT(pid, ports) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return -1; } socket_id = (uint8_t)int_fld[FLD_SOCKET]; @@ -429,7 +425,6 @@ parse_ringnuma_config(const char *q_arg) }; unsigned long int_fld[_NUM_FLD]; char *str_fld[_NUM_FLD]; - portid_t pid; #define RX_RING_ONLY 0x1 #define TX_RING_ONLY 0x2 #define RXTX_RING 0x3 @@ -456,10 +451,7 @@ parse_ringnuma_config(const char *q_arg) port_id = (portid_t)int_fld[FLD_PORT]; if (port_id_is_invalid(port_id, ENABLED_WARN) || port_id == (portid_t)RTE_PORT_ALL) { - printf("Valid port range is [0"); - FOREACH_PORT(pid, ports) - printf(", %d", pid); - printf("]\n"); + print_valid_ports(); return -1; } socket_id = (uint8_t)int_fld[FLD_SOCKET]; diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 9de01fed..4512ba92 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -675,18 +675,23 @@ init_fwd_streams(void) /* init new */ nb_fwd_streams = nb_fwd_streams_new; - fwd_streams = rte_zmalloc("testpmd: fwd_streams", - sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); - if (fwd_streams == NULL) - rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " - "failed\n", nb_fwd_streams); - - for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { - fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", - sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); - if (fwd_streams[sm_id] == NULL) - rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" - " failed\n"); + if (nb_fwd_streams) { + fwd_streams = rte_zmalloc("testpmd: fwd_streams", + sizeof(struct fwd_stream *) * nb_fwd_streams, + RTE_CACHE_LINE_SIZE); + if (fwd_streams == NULL) + rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" + " (struct fwd_stream *)) failed\n", + nb_fwd_streams); + + for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { + fwd_streams[sm_id] = rte_zmalloc("testpmd:" + " struct fwd_stream", sizeof(struct fwd_stream), + RTE_CACHE_LINE_SIZE); + if (fwd_streams[sm_id] == NULL) + rte_exit(EXIT_FAILURE, "rte_zmalloc" + "(struct fwd_stream) failed\n"); + } } return 0; @@ -720,6 +725,9 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) pktnb_stats[1] = pktnb_stats[0]; burst_stats[0] = nb_burst; pktnb_stats[0] = nb_pkt; + } else if (nb_burst > burst_stats[1]) { + burst_stats[1] = nb_burst; + pktnb_stats[1] = nb_pkt; } } if (total_burst == 0) @@ -976,6 +984,31 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) } /* + * Update the forward ports list. + */ +void +update_fwd_ports(portid_t new_pid) +{ + unsigned int i; + unsigned int new_nb_fwd_ports = 0; + int move = 0; + + for (i = 0; i < nb_fwd_ports; ++i) { + if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN)) + move = 1; + else if (move) + fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i]; + else + new_nb_fwd_ports++; + } + if (new_pid < RTE_MAX_ETHPORTS) + fwd_ports_ids[new_nb_fwd_ports++] = new_pid; + + nb_fwd_ports = new_nb_fwd_ports; + nb_cfg_ports = new_nb_fwd_ports; +} + +/* * Launch packet forwarding configuration. */ void @@ -1010,10 +1043,6 @@ start_packet_forwarding(int with_tx_first) return; } - if (init_fwd_streams() < 0) { - printf("Fail from init_fwd_streams()\n"); - return; - } if(dcb_test) { for (i = 0; i < nb_fwd_ports; i++) { @@ -1033,10 +1062,11 @@ start_packet_forwarding(int with_tx_first) } test_done = 0; + fwd_config_setup(); + if(!no_flush_rx) flush_fwd_rx_queues(); - fwd_config_setup(); pkt_fwd_config_display(&cur_fwd_config); rxtx_config_display(); @@ -1572,6 +1602,8 @@ attach_port(char *identifier) ports[pi].port_status = RTE_PORT_STOPPED; + update_fwd_ports(pi); + printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); printf("Done\n"); } @@ -1594,6 +1626,8 @@ detach_port(uint8_t port_id) ports[port_id].enabled = 0; nb_ports = rte_eth_dev_count(); + update_fwd_ports(RTE_MAX_ETHPORTS); + printf("Port '%s' is detached. Now total ports is %d\n", name, nb_ports); printf("Done\n"); @@ -1857,7 +1891,10 @@ uint8_t port_is_bonding_slave(portid_t slave_pid) struct rte_port *port; port = &ports[slave_pid]; - return port->slave_flag; + if ((rte_eth_devices[slave_pid].data->dev_flags & + RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) + return 1; + return 0; } const uint16_t vlan_tags[] = { diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index 9c1e7039..aad598e5 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -493,6 +493,7 @@ void fwd_config_setup(void); void set_def_fwd_config(void); void reconfig(portid_t new_port_id, unsigned socket_id); int init_fwd_streams(void); +void update_fwd_ports(portid_t new_pid); void port_mtu_set(portid_t port_id, uint16_t mtu); void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos); @@ -599,6 +600,7 @@ enum print_warning { DISABLED_WARN }; int port_id_is_invalid(portid_t port_id, enum print_warning warning); +void print_valid_ports(void); /* * Work-around of a compilation error with ICC on invocations of the diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c index c9e0b66e..24c72963 100644 --- a/app/test/test_cryptodev.c +++ b/app/test/test_cryptodev.c @@ -4621,7 +4621,7 @@ test_multi_session_random_usage(void) * dev_info.sym.max_nb_sessions) + 1, 0); for (i = 0; i < MB_SESSION_NUMBER; i++) { - rte_memcpy(&ut_paramz[i].ut_params, &testsuite_params, + rte_memcpy(&ut_paramz[i].ut_params, &unittest_params, sizeof(struct crypto_unittest_params)); test_AES_CBC_HMAC_SHA512_decrypt_create_session_params( diff --git a/app/test/test_distributor_perf.c b/app/test/test_distributor_perf.c index 7947fe9b..cc621963 100644 --- a/app/test/test_distributor_perf.c +++ b/app/test/test_distributor_perf.c @@ -57,7 +57,7 @@ struct worker_stats worker_stats[RTE_MAX_LCORE]; /* worker thread used for testing the time to do a round-trip of a cache * line between two cores and back again */ -static void +static int flip_bit(volatile uint64_t *arg) { uint64_t old_val = 0; @@ -67,6 +67,7 @@ flip_bit(volatile uint64_t *arg) old_val = *arg; *arg = 0; } + return 0; } /* test case to time the number of cycles to round-trip a cache line between diff --git a/app/test/test_eal_flags.c b/app/test/test_eal_flags.c index 91b40664..15248ea2 100644 --- a/app/test/test_eal_flags.c +++ b/app/test/test_eal_flags.c @@ -66,7 +66,7 @@ #define memtest "memtest" #define memtest1 "memtest1" #define memtest2 "memtest2" -#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 10) +#define SOCKET_MEM_STRLEN (RTE_MAX_NUMA_NODES * 20) #define launch_proc(ARGV) process_dup(ARGV, \ sizeof(ARGV)/(sizeof(ARGV[0])), __func__) @@ -1241,10 +1241,11 @@ test_memory_flags(void) #ifdef RTE_EXEC_ENV_BSDAPP int i, num_sockets = 1; #else - int i, num_sockets = get_number_of_sockets(); + int i, num_sockets = RTE_MIN(get_number_of_sockets(), + RTE_MAX_NUMA_NODES); #endif - if (num_sockets <= 0 || num_sockets > RTE_MAX_NUMA_NODES) { + if (num_sockets <= 0) { printf("Error - cannot get number of sockets!\n"); return -1; } diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c index 715b250c..8fb13ce6 100644 --- a/app/test/test_mempool.c +++ b/app/test/test_mempool.c @@ -356,17 +356,17 @@ test_mempool_sp_sc(void) } if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) { printf("Cannot lookup mempool from its name\n"); - rte_mempool_free(mp_spsc); - RET_ERR(); + ret = -1; + goto err; } lcore_next = rte_get_next_lcore(lcore_id, 0, 1); if (lcore_next >= RTE_MAX_LCORE) { - rte_mempool_free(mp_spsc); - RET_ERR(); + ret = -1; + goto err; } if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) { - rte_mempool_free(mp_spsc); - RET_ERR(); + ret = -1; + goto err; } rte_spinlock_init(&scsp_spinlock); memset(scsp_obj_table, 0, sizeof(scsp_obj_table)); @@ -377,7 +377,10 @@ test_mempool_sp_sc(void) if (rte_eal_wait_lcore(lcore_next) < 0) ret = -1; + +err: rte_mempool_free(mp_spsc); + mp_spsc = NULL; return ret; } diff --git a/app/test/test_reorder.c b/app/test/test_reorder.c index 26dab0c2..add45511 100644 --- a/app/test/test_reorder.c +++ b/app/test/test_reorder.c @@ -177,11 +177,11 @@ test_reorder_insert(void) b = rte_reorder_create("test_insert", rte_socket_id(), size); TEST_ASSERT_NOT_NULL(b, "Failed to create reorder buffer"); - ret = rte_mempool_get_bulk(p, (void *)bufs, num_bufs); - TEST_ASSERT_SUCCESS(ret, "Error getting mbuf from pool"); - - for (i = 0; i < num_bufs; i++) + for (i = 0; i < num_bufs; i++) { + bufs[i] = rte_pktmbuf_alloc(p); + TEST_ASSERT_NOT_NULL(bufs[i], "Packet allocation failed\n"); bufs[i]->seqn = i; + } /* This should fill up order buffer: * reorder_seq = 0 @@ -196,6 +196,7 @@ test_reorder_insert(void) ret = -1; goto exit; } + bufs[i] = NULL; } /* early packet - should move mbufs to ready buf and move sequence window @@ -210,6 +211,7 @@ test_reorder_insert(void) ret = -1; goto exit; } + bufs[4] = NULL; /* early packet from current sequence window - full ready buffer */ bufs[5]->seqn = 2 * size; @@ -220,6 +222,7 @@ test_reorder_insert(void) ret = -1; goto exit; } + bufs[5] = NULL; /* late packet */ bufs[6]->seqn = 3 * size; @@ -230,11 +233,15 @@ test_reorder_insert(void) ret = -1; goto exit; } + bufs[6] = NULL; ret = 0; exit: - rte_mempool_put_bulk(p, (void *)bufs, num_bufs); rte_reorder_free(b); + for (i = 0; i < num_bufs; i++) { + if (bufs[i] != NULL) + rte_pktmbuf_free(bufs[i]); + } return ret; } @@ -250,6 +257,10 @@ test_reorder_drain(void) int ret = 0; unsigned i, cnt; + /* initialize all robufs to NULL */ + for (i = 0; i < num_bufs; i++) + robufs[i] = NULL; + /* This would create a reorder buffer instance consisting of: * reorder_seq = 0 * ready_buf: RB[size] = {NULL, NULL, NULL, NULL} @@ -258,9 +269,6 @@ test_reorder_drain(void) b = rte_reorder_create("test_drain", rte_socket_id(), size); TEST_ASSERT_NOT_NULL(b, "Failed to create reorder buffer"); - ret = rte_mempool_get_bulk(p, (void *)bufs, num_bufs); - TEST_ASSERT_SUCCESS(ret, "Error getting mbuf from pool"); - /* Check no drained packets if reorder is empty */ cnt = rte_reorder_drain(b, robufs, 1); if (cnt != 0) { @@ -270,8 +278,11 @@ test_reorder_drain(void) goto exit; } - for (i = 0; i < num_bufs; i++) + for (i = 0; i < num_bufs; i++) { + bufs[i] = rte_pktmbuf_alloc(p); + TEST_ASSERT_NOT_NULL(bufs[i], "Packet allocation failed\n"); bufs[i]->seqn = i; + } /* Insert packet with seqn 1: * reorder_seq = 0 @@ -279,6 +290,7 @@ test_reorder_drain(void) * OB[] = {1, NULL, NULL, NULL} */ rte_reorder_insert(b, bufs[1]); + bufs[1] = NULL; cnt = rte_reorder_drain(b, robufs, 1); if (cnt != 1) { @@ -287,6 +299,8 @@ test_reorder_drain(void) ret = -1; goto exit; } + if (robufs[0] != NULL) + rte_pktmbuf_free(robufs[i]); /* Insert more packets * RB[] = {NULL, NULL, NULL, NULL} @@ -294,18 +308,22 @@ test_reorder_drain(void) */ rte_reorder_insert(b, bufs[2]); rte_reorder_insert(b, bufs[3]); + bufs[2] = NULL; + bufs[3] = NULL; /* Insert more packets * RB[] = {NULL, NULL, NULL, NULL} * OB[] = {NULL, 2, 3, 4} */ rte_reorder_insert(b, bufs[4]); + bufs[4] = NULL; /* Insert more packets * RB[] = {2, 3, 4, NULL} * OB[] = {NULL, NULL, 7, NULL} */ rte_reorder_insert(b, bufs[7]); + bufs[7] = NULL; /* drained expected packets */ cnt = rte_reorder_drain(b, robufs, 4); @@ -315,6 +333,10 @@ test_reorder_drain(void) ret = -1; goto exit; } + for (i = 0; i < 3; i++) { + if (robufs[i] != NULL) + rte_pktmbuf_free(robufs[i]); + } /* * RB[] = {NULL, NULL, NULL, NULL} @@ -329,8 +351,13 @@ test_reorder_drain(void) } ret = 0; exit: - rte_mempool_put_bulk(p, (void *)bufs, num_bufs); rte_reorder_free(b); + for (i = 0; i < num_bufs; i++) { + if (bufs[i] != NULL) + rte_pktmbuf_free(bufs[i]); + if (robufs[i] != NULL) + rte_pktmbuf_free(robufs[i]); + } return ret; } diff --git a/app/test/test_table_pipeline.c b/app/test/test_table_pipeline.c index 36bfeda3..017d283f 100644 --- a/app/test/test_table_pipeline.c +++ b/app/test/test_table_pipeline.c @@ -98,9 +98,9 @@ rte_pipeline_table_action_handler_hit table_action_stub_hit(struct rte_pipeline *p, struct rte_mbuf **pkts, uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg); -rte_pipeline_table_action_handler_miss +static int table_action_stub_miss(struct rte_pipeline *p, struct rte_mbuf **pkts, - uint64_t pkts_mask, struct rte_pipeline_table_entry **entry, void *arg); + uint64_t pkts_mask, struct rte_pipeline_table_entry *entry, void *arg); rte_pipeline_table_action_handler_hit table_action_0x00(__attribute__((unused)) struct rte_pipeline *p, @@ -130,11 +130,11 @@ table_action_stub_hit(__attribute__((unused)) struct rte_pipeline *p, return 0; } -rte_pipeline_table_action_handler_miss +static int table_action_stub_miss(struct rte_pipeline *p, __attribute__((unused)) struct rte_mbuf **pkts, uint64_t pkts_mask, - __attribute__((unused)) struct rte_pipeline_table_entry **entry, + __attribute__((unused)) struct rte_pipeline_table_entry *entry, __attribute__((unused)) void *arg) { printf("STUB Table Action Miss - setting mask to 0x%"PRIx64"\n", @@ -546,8 +546,7 @@ test_table_pipeline(void) /* TEST - one packet per port */ action_handler_hit = NULL; - action_handler_miss = - (rte_pipeline_table_action_handler_miss) table_action_stub_miss; + action_handler_miss = table_action_stub_miss; table_entry_default_action = RTE_PIPELINE_ACTION_PORT; override_miss_mask = 0x01; /* one packet per port */ setup_pipeline(e_TEST_STUB); @@ -582,8 +581,7 @@ test_table_pipeline(void) printf("TEST - two tables, hitmask override to 0x01\n"); connect_miss_action_to_table = 1; - action_handler_miss = - (rte_pipeline_table_action_handler_miss)table_action_stub_miss; + action_handler_miss = table_action_stub_miss; override_miss_mask = 0x01; setup_pipeline(e_TEST_STUB); if (test_pipeline_single_filter(e_TEST_STUB, 2) < 0) diff --git a/config/common_base b/config/common_base index 2d4a47fb..802fb5e3 100644 --- a/config/common_base +++ b/config/common_base @@ -96,6 +96,7 @@ CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n CONFIG_RTE_EAL_IGB_UIO=n CONFIG_RTE_EAL_VFIO=n CONFIG_RTE_MALLOC_DEBUG=n +CONFIG_RTE_USE_LIBBSD=n # Default driver path (or "" to disable) CONFIG_RTE_EAL_PMD_PATH="" diff --git a/devtools/cocci/strlcpy.cocci b/devtools/cocci/strlcpy.cocci new file mode 100644 index 00000000..335e2712 --- /dev/null +++ b/devtools/cocci/strlcpy.cocci @@ -0,0 +1,8 @@ +@use_strlcpy@ +identifier src, dst; +expression size; +@@ +( +- snprintf(dst, size, "%s", src) ++ strlcpy(dst, src, size) +) diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst b/doc/guides/prog_guide/env_abstraction_layer.rst index 10a10a88..110e3a28 100644 --- a/doc/guides/prog_guide/env_abstraction_layer.rst +++ b/doc/guides/prog_guide/env_abstraction_layer.rst @@ -359,7 +359,7 @@ Known Issues + rte_timer - Running ``rte_timer_manager()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed. + Running ``rte_timer_manage()`` on a non-EAL pthread is not allowed. However, resetting/stopping the timer from a non-EAL pthread is allowed. + rte_log diff --git a/doc/guides/rel_notes/release_16_11.rst b/doc/guides/rel_notes/release_16_11.rst index e398e291..dc52bf43 100644 --- a/doc/guides/rel_notes/release_16_11.rst +++ b/doc/guides/rel_notes/release_16_11.rst @@ -1171,3 +1171,119 @@ Fixes in 16.11 LTS Release * vhost: handle virtually non-contiguous buffers in Rx-mrg (fixes CVE-2018-1059) * vhost: handle virtually non-contiguous buffers in Tx (fixes CVE-2018-1059) * vhost-user: fix deadlock in case of NUMA realloc + +16.11.7 +~~~~~~~ + +* app/crypto-perf: fix parameters copy +* app/testpmd: fix burst stats reporting +* app/testpmd: fix command token +* app/testpmd: fix forward ports Rx flush +* app/testpmd: fix forward ports update +* app/testpmd: fix slave port detection +* app/testpmd: fix synchronic port hotplug +* app/testpmd: fix valid ports prints +* bus/pci: fix size of driver name buffer +* crypto/zuc: do not set default op status +* crypto/zuc: remove unnecessary check +* doc: fix a typo in the EAL guide +* drivers/net: fix icc deprecated parameter warning +* drivers/net: fix link autoneg value for virtual PMDs +* eal: declare trace buffer at top of own block +* eal: explicit cast in rwlock functions +* eal: explicit cast of builtin for bsf32 +* eal: explicit cast of core id when getting index +* eal: fix casts in random functions +* eal: fix typo in doc of pointer offset macro +* eal/ppc: remove braces in SMP memory barrier macro +* eal: remove unused path pattern +* eal: support strlcpy function +* ethdev: explicit cast of buffered Tx number +* ethdev: explicit cast of queue count return +* ethdev: fix queue start +* ethdev: fix string length in name comparison +* ethdev: fix type and scope of variables in Rx burst +* ethdev: improve doc for name by port ID API +* examples/exception_path: limit core count to 64 +* examples/performance-thread: fix return type of threads +* hash: explicit casts for truncation in CRC32c +* hash: fix comment for lookup +* hash: move stack declaration at top of CRC32c function +* ip_frag: fix double free of chained mbufs +* ip_frag: fix some debug logs +* kni: fix build on RHEL 7.5 +* kvargs: fix syntax in comments +* mbuf: avoid integer promotion in prepend/adj/chain +* mbuf: explicit cast of headroom on reset +* mbuf: explicit cast of size on detach +* mbuf: explicit casts of reference counter +* mbuf: fix reference counter integer promotion +* mbuf: fix Tx checksum offload API doc +* mbuf: fix type of private size in detach +* mempool: fix leak when no objects are populated +* mempool: fix virtual address population +* memzone: fix size on reserving biggest memzone +* net/bnx2x: do not cast function pointers as a policy +* net/bnx2x: fix for PCI FLR after ungraceful exit +* net/bnx2x: fix KR2 device check +* net/bnx2x: fix memzone name overrun +* net/bnxt: avoid freeing memzone multiple times +* net/bnxt: fix endianness of flag +* net/bnxt: fix mbuf data offset initialization +* net/bnxt: fix Rx checksum flags +* net/bnxt: fix Rx checksum flags for tunnel frames +* net/bnxt: fix Rx drop setting +* net/bnxt: fix Rx mbuf and agg ring leak in dev stop +* net/bonding: clear started state if start fails +* net/bonding: export mode 4 slave info routine +* net/bonding: fix setting VLAN ID on slave ports +* net/enic: allocate stats DMA buffer upfront during probe +* net/enic: fix crash on MTU update with non-setup queues +* net: explicit cast in L4 checksum +* net: explicit cast of IP checksum to 16-bit +* net: explicit cast of multicast bit clearing +* net: explicit cast of protocol in IPv6 checksum +* net/i40e: fix failing to disable FDIR Tx queue +* net/i40e: fix intr callback unregister by adding retry +* net/i40e: fix link status update +* net/i40e: fix link update no wait +* net/i40e: fix shifts of signed values +* net/ixgbe: fix DCB configuration +* net/ixgbe: fix intr callback unregister by adding retry +* net/ixgbe: fix too many interrupts +* net/mlx5: fix ARM build +* net/mlx5: fix double free on error handling +* net/mlx5: fix resource leak in case of error +* net: move stack variable at top of VLAN strip function +* net/nfp: fix assigning port id in mbuf +* net/nfp: fix barrier location +* net/nfp: fix mbufs releasing when stop or close +* net/nfp: fix memcpy out of source range +* net/qede: fix alloc from socket 0 +* net/qede: fix strncpy +* net/qede: fix unicast filter routine return code +* net/qede: replace strncpy by strlcpy +* net/szedata2: fix format string for PCI address +* net/szedata2: fix total stats +* net/thunderx: fix MTU configuration for jumbo packets +* net/vhost: initialise device as inactive +* net/virtio-user: fix hugepage files enumeration +* net/vmxnet3: keep link state consistent +* net/vmxnet3: set the queue shared buffer at start +* Revert "vhost: fix device cleanup at stop" +* spinlock/x86: move stack declaration before code +* test/distributor: fix return type of thread function +* test: fix memory flags test for low NUMA nodes number +* test/mempool: fix autotest retry +* test/pipeline: fix return type of stub miss +* test/pipeline: fix type of table entry parameter +* test/reorder: fix freeing mbuf twice +* vhost: check cmsg not null +* vhost: fix compilation issue when vhost debug enabled +* vhost: fix dead lock on closing in server mode +* vhost: fix device cleanup at stop +* vhost: fix log macro name conflict +* vhost: fix offset while mmaping log base address +* vhost: fix realloc failure +* vhost: fix typo in comment +* vhost: improve dirty pages logging performance diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c index 7057fcac..47eb13a1 100644 --- a/drivers/crypto/zuc/rte_zuc_pmd.c +++ b/drivers/crypto/zuc/rte_zuc_pmd.c @@ -371,12 +371,8 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, for (i = 0; i < nb_ops; i++) { curr_c_op = ops[i]; - /* Set status as enqueued (not processed yet) by default. */ - curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - curr_sess = zuc_get_session(qp, curr_c_op); - if (unlikely(curr_sess == NULL || - curr_sess->op == ZUC_OP_NOT_SUPPORTED)) { + if (unlikely(curr_sess == NULL)) { curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; break; diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c index 6d73f127..3628e6e9 100644 --- a/drivers/net/af_packet/rte_eth_af_packet.c +++ b/drivers/net/af_packet/rte_eth_af_packet.c @@ -121,7 +121,7 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG + .link_autoneg = ETH_LINK_FIXED, }; static uint16_t diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile index e971fb66..dc1c7bd2 100644 --- a/drivers/net/bnx2x/Makefile +++ b/drivers/net/bnx2x/Makefile @@ -15,7 +15,7 @@ EXPORT_MAP := rte_pmd_bnx2x_version.map LIBABIVER := 1 ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) -CFLAGS += -wd188 #188: enumerated type mixed with another type +CFLAGS += -diag-disable 188 #188: enumerated type mixed with another type endif # diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c index 0d16a737..c2842e3a 100644 --- a/drivers/net/bnx2x/bnx2x.c +++ b/drivers/net/bnx2x/bnx2x.c @@ -170,10 +170,10 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma, dma->sc = sc; if (IS_PF(sc)) - sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, + snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg, rte_get_timer_cycles()); else - sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, + snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg, rte_get_timer_cycles()); /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */ @@ -8289,16 +8289,6 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc) REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0); REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0); } - -/* - * Enable internal target-read (in case we are probed after PF - * FLR). Must be done prior to any BAR read access. Only for - * 57712 and up - */ - if (!CHIP_IS_E1x(sc)) { - REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, - 1); - } } /* get the nvram size */ @@ -9675,7 +9665,17 @@ int bnx2x_attach(struct bnx2x_softc *sc) bnx2x_init_rte(sc); if (IS_PF(sc)) { -/* get device info and set params */ + /* Enable internal target-read (in case we are probed after PF + * FLR). Must be done prior to any BAR read access. Only for + * 57712 and up + */ + if (!CHIP_IS_E1x(sc)) { + REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, + 1); + DELAY(200000); + } + + /* get device info and set params */ if (bnx2x_get_device_info(sc) != 0) { PMD_DRV_LOG(NOTICE, "getting device info"); return -ENXIO; @@ -9684,7 +9684,7 @@ int bnx2x_attach(struct bnx2x_softc *sc) /* get phy settings from shmem and 'and' against admin settings */ bnx2x_get_phy_info(sc); } else { -/* Left mac of VF unfilled, PF should set it for VF */ + /* Left mac of VF unfilled, PF should set it for VF */ memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN); } diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c index 53293962..89747104 100644 --- a/drivers/net/bnx2x/elink.c +++ b/drivers/net/bnx2x/elink.c @@ -4143,9 +4143,9 @@ static void elink_sfp_e3_set_transmitter(struct elink_params *params, elink_set_cfg_pin(sc, cfg_pin + 3, tx_en ^ 1); } -static void elink_warpcore_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_warpcore_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint32_t serdes_net_if; @@ -4222,7 +4222,7 @@ static void elink_warpcore_config_init(struct elink_phy *phy, case PORT_HW_CFG_NET_SERDES_IF_DXGXS: if (vars->line_speed != ELINK_SPEED_20000) { PMD_DRV_LOG(DEBUG, "Speed not supported yet"); - return; + return 0; } PMD_DRV_LOG(DEBUG, "Setting 20G DXGXS"); elink_warpcore_set_20G_DXGXS(sc, phy, lane); @@ -4242,13 +4242,15 @@ static void elink_warpcore_config_init(struct elink_phy *phy, PMD_DRV_LOG(DEBUG, "Unsupported Serdes Net Interface 0x%x", serdes_net_if); - return; + return 0; } } /* Take lane out of reset after configuration is finished */ elink_warpcore_reset_lane(sc, phy, 0); PMD_DRV_LOG(DEBUG, "Exit config init"); + + return 0; } static void elink_warpcore_link_reset(struct elink_phy *phy, @@ -5226,9 +5228,9 @@ static elink_status_t elink_get_link_speed_duplex(struct elink_phy *phy, return ELINK_STATUS_OK; } -static elink_status_t elink_link_settings_status(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_link_settings_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; @@ -5299,9 +5301,9 @@ static elink_status_t elink_link_settings_status(struct elink_phy *phy, return rc; } -static elink_status_t elink_warpcore_read_status(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_warpcore_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint8_t lane; @@ -5520,9 +5522,9 @@ static void elink_set_preemphasis(struct elink_phy *phy, } } -static void elink_xgxs_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_xgxs_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { uint8_t enable_cl73 = (ELINK_SINGLE_MEDIA_DIRECT(params) || (params->loopback_mode == ELINK_LOOPBACK_XGXS)); @@ -5567,6 +5569,8 @@ static void elink_xgxs_config_init(struct elink_phy *phy, elink_initialize_sgmii_process(phy, params, vars); } + + return 0; } static elink_status_t elink_prepare_xgxs(struct elink_phy *phy, @@ -5751,8 +5755,8 @@ static void elink_link_int_ack(struct elink_params *params, } } -static elink_status_t elink_format_ver(uint32_t num, uint8_t * str, - uint16_t * len) +static uint8_t elink_format_ver(uint32_t num, uint8_t * str, + uint16_t * len) { uint8_t *str_ptr = str; uint32_t mask = 0xf0000000; @@ -5790,8 +5794,8 @@ static elink_status_t elink_format_ver(uint32_t num, uint8_t * str, return ELINK_STATUS_OK; } -static elink_status_t elink_null_format_ver(__rte_unused uint32_t spirom_ver, - uint8_t * str, uint16_t * len) +static uint8_t elink_null_format_ver(__rte_unused uint32_t spirom_ver, + uint8_t * str, uint16_t * len) { str[0] = '\0'; (*len)--; @@ -6801,9 +6805,9 @@ static void elink_8073_specific_func(struct elink_phy *phy, } } -static elink_status_t elink_8073_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_8073_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint16_t val = 0, tmp1; @@ -7096,9 +7100,9 @@ static void elink_8073_link_reset(__rte_unused struct elink_phy *phy, /******************************************************************/ /* BNX2X8705 PHY SECTION */ /******************************************************************/ -static elink_status_t elink_8705_config_init(struct elink_phy *phy, - struct elink_params *params, - __rte_unused struct elink_vars +static uint8_t elink_8705_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; @@ -8402,9 +8406,9 @@ static uint8_t elink_8706_config_init(struct elink_phy *phy, return ELINK_STATUS_OK; } -static elink_status_t elink_8706_read_status(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_8706_read_status(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { return elink_8706_8726_read_status(phy, params, vars); } @@ -8476,9 +8480,9 @@ static uint8_t elink_8726_read_status(struct elink_phy *phy, return link_up; } -static elink_status_t elink_8726_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_8726_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; PMD_DRV_LOG(DEBUG, "Initializing BNX2X8726"); @@ -8683,9 +8687,9 @@ static void elink_8727_config_speed(struct elink_phy *phy, } } -static elink_status_t elink_8727_config_init(struct elink_phy *phy, - struct elink_params *params, - __rte_unused struct elink_vars +static uint8_t elink_8727_config_init(struct elink_phy *phy, + struct elink_params *params, + __rte_unused struct elink_vars *vars) { uint32_t tx_en_mode; @@ -9290,7 +9294,7 @@ static elink_status_t elink_848xx_cmn_config_init(struct elink_phy *phy, return ELINK_STATUS_OK; } -static elink_status_t elink_8481_config_init(struct elink_phy *phy, +static uint8_t elink_8481_config_init(struct elink_phy *phy, struct elink_params *params, struct elink_vars *vars) { @@ -9441,8 +9445,8 @@ static uint8_t elink_84833_get_reset_gpios(struct bnx2x_softc *sc, return reset_gpios; } -static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy, - struct elink_params *params) +static void elink_84833_hw_reset_phy(struct elink_phy *phy, + struct elink_params *params) { struct bnx2x_softc *sc = params->sc; uint8_t reset_gpios; @@ -9470,8 +9474,6 @@ static elink_status_t elink_84833_hw_reset_phy(struct elink_phy *phy, MISC_REGISTERS_GPIO_OUTPUT_LOW); DELAY(10); PMD_DRV_LOG(DEBUG, "84833 hw reset on pin values 0x%x", reset_gpios); - - return ELINK_STATUS_OK; } static elink_status_t elink_8483x_disable_eee(struct elink_phy *phy, @@ -9512,9 +9514,9 @@ static elink_status_t elink_8483x_enable_eee(struct elink_phy *phy, } #define PHY84833_CONSTANT_LATENCY 1193 -static elink_status_t elink_848x3_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_848x3_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint8_t port, initialize = 1; @@ -9818,7 +9820,7 @@ static uint8_t elink_848xx_read_status(struct elink_phy *phy, return link_up; } -static elink_status_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str, +static uint8_t elink_848xx_format_ver(uint32_t raw_ver, uint8_t * str, uint16_t * len) { elink_status_t status = ELINK_STATUS_OK; @@ -10145,9 +10147,9 @@ static void elink_54618se_specific_func(struct elink_phy *phy, } } -static elink_status_t elink_54618se_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_54618se_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { struct bnx2x_softc *sc = params->sc; uint8_t port; @@ -10541,9 +10543,9 @@ static void elink_7101_config_loopback(struct elink_phy *phy, MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100); } -static elink_status_t elink_7101_config_init(struct elink_phy *phy, - struct elink_params *params, - struct elink_vars *vars) +static uint8_t elink_7101_config_init(struct elink_phy *phy, + struct elink_params *params, + struct elink_vars *vars) { uint16_t fw_ver1, fw_ver2, val; struct bnx2x_softc *sc = params->sc; @@ -10613,8 +10615,8 @@ static uint8_t elink_7101_read_status(struct elink_phy *phy, return link_up; } -static elink_status_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str, - uint16_t * len) +static uint8_t elink_7101_format_ver(uint32_t spirom_ver, uint8_t * str, + uint16_t * len) { if (*len < 5) return ELINK_STATUS_ERROR; @@ -10679,14 +10681,14 @@ static const struct elink_phy phy_null = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) NULL, - .read_status = (read_status_t) NULL, - .link_reset = (link_reset_t) NULL, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = NULL, + .read_status = NULL, + .link_reset = NULL, + .config_loopback = NULL, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_serdes = { @@ -10713,14 +10715,14 @@ static const struct elink_phy phy_serdes = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_xgxs_config_init, - .read_status = (read_status_t) elink_link_settings_status, - .link_reset = (link_reset_t) elink_int_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_xgxs_config_init, + .read_status = elink_link_settings_status, + .link_reset = elink_int_link_reset, + .config_loopback = NULL, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_xgxs = { @@ -10748,14 +10750,14 @@ static const struct elink_phy phy_xgxs = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_xgxs_config_init, - .read_status = (read_status_t) elink_link_settings_status, - .link_reset = (link_reset_t) elink_int_link_reset, - .config_loopback = (config_loopback_t) elink_set_xgxs_loopback, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) elink_xgxs_specific_func + .config_init = elink_xgxs_config_init, + .read_status = elink_link_settings_status, + .link_reset = elink_int_link_reset, + .config_loopback = elink_set_xgxs_loopback, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = elink_xgxs_specific_func }; static const struct elink_phy phy_warpcore = { @@ -10784,14 +10786,14 @@ static const struct elink_phy phy_warpcore = { .speed_cap_mask = 0, /* req_duplex = */ 0, /* rsrv = */ 0, - .config_init = (config_init_t) elink_warpcore_config_init, - .read_status = (read_status_t) elink_warpcore_read_status, - .link_reset = (link_reset_t) elink_warpcore_link_reset, - .config_loopback = (config_loopback_t) elink_set_warpcore_loopback, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) elink_warpcore_hw_reset, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_warpcore_config_init, + .read_status = elink_warpcore_read_status, + .link_reset = elink_warpcore_link_reset, + .config_loopback = elink_set_warpcore_loopback, + .format_fw_ver = NULL, + .hw_reset = elink_warpcore_hw_reset, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_7101 = { @@ -10813,14 +10815,14 @@ static const struct elink_phy phy_7101 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_7101_config_init, - .read_status = (read_status_t) elink_7101_read_status, - .link_reset = (link_reset_t) elink_common_ext_link_reset, - .config_loopback = (config_loopback_t) elink_7101_config_loopback, - .format_fw_ver = (format_fw_ver_t) elink_7101_format_ver, - .hw_reset = (hw_reset_t) elink_7101_hw_reset, - .set_link_led = (set_link_led_t) elink_7101_set_link_led, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_7101_config_init, + .read_status = elink_7101_read_status, + .link_reset = elink_common_ext_link_reset, + .config_loopback = elink_7101_config_loopback, + .format_fw_ver = elink_7101_format_ver, + .hw_reset = elink_7101_hw_reset, + .set_link_led = elink_7101_set_link_led, + .phy_specific_func = NULL }; static const struct elink_phy phy_8073 = { @@ -10844,14 +10846,14 @@ static const struct elink_phy phy_8073 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8073_config_init, - .read_status = (read_status_t) elink_8073_read_status, - .link_reset = (link_reset_t) elink_8073_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) elink_8073_specific_func + .config_init = elink_8073_config_init, + .read_status = elink_8073_read_status, + .link_reset = elink_8073_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = elink_8073_specific_func }; static const struct elink_phy phy_8705 = { @@ -10872,14 +10874,14 @@ static const struct elink_phy phy_8705 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8705_config_init, - .read_status = (read_status_t) elink_8705_read_status, - .link_reset = (link_reset_t) elink_common_ext_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_null_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8705_config_init, + .read_status = elink_8705_read_status, + .link_reset = elink_common_ext_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_null_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_8706 = { @@ -10901,14 +10903,14 @@ static const struct elink_phy phy_8706 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8706_config_init, - .read_status = (read_status_t) elink_8706_read_status, - .link_reset = (link_reset_t) elink_common_ext_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8706_config_init, + .read_status = elink_8706_read_status, + .link_reset = elink_common_ext_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_8726 = { @@ -10931,14 +10933,14 @@ static const struct elink_phy phy_8726 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8726_config_init, - .read_status = (read_status_t) elink_8726_read_status, - .link_reset = (link_reset_t) elink_8726_link_reset, - .config_loopback = (config_loopback_t) elink_8726_config_loopback, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) NULL, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8726_config_init, + .read_status = elink_8726_read_status, + .link_reset = elink_8726_link_reset, + .config_loopback = elink_8726_config_loopback, + .format_fw_ver = elink_format_ver, + .hw_reset = NULL, + .set_link_led = NULL, + .phy_specific_func = NULL }; static const struct elink_phy phy_8727 = { @@ -10960,14 +10962,14 @@ static const struct elink_phy phy_8727 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8727_config_init, - .read_status = (read_status_t) elink_8727_read_status, - .link_reset = (link_reset_t) elink_8727_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_format_ver, - .hw_reset = (hw_reset_t) elink_8727_hw_reset, - .set_link_led = (set_link_led_t) elink_8727_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_8727_specific_func + .config_init = elink_8727_config_init, + .read_status = elink_8727_read_status, + .link_reset = elink_8727_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_format_ver, + .hw_reset = elink_8727_hw_reset, + .set_link_led = elink_8727_set_link_led, + .phy_specific_func = elink_8727_specific_func }; static const struct elink_phy phy_8481 = { @@ -10995,14 +10997,14 @@ static const struct elink_phy phy_8481 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_8481_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_8481_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) elink_8481_hw_reset, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) NULL + .config_init = elink_8481_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_8481_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = elink_8481_hw_reset, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = NULL }; static const struct elink_phy phy_84823 = { @@ -11030,14 +11032,14 @@ static const struct elink_phy phy_84823 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_848x3_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_848x3_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func + .config_init = elink_848x3_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = NULL, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = elink_848xx_specific_func }; static const struct elink_phy phy_84833 = { @@ -11064,14 +11066,14 @@ static const struct elink_phy phy_84833 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_848x3_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_848x3_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func + .config_init = elink_848x3_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = elink_84833_hw_reset_phy, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = elink_848xx_specific_func }; static const struct elink_phy phy_84834 = { @@ -11097,14 +11099,14 @@ static const struct elink_phy phy_84834 = { .speed_cap_mask = 0, .req_duplex = 0, .rsrv = 0, - .config_init = (config_init_t) elink_848x3_config_init, - .read_status = (read_status_t) elink_848xx_read_status, - .link_reset = (link_reset_t) elink_848x3_link_reset, - .config_loopback = (config_loopback_t) NULL, - .format_fw_ver = (format_fw_ver_t) elink_848xx_format_ver, - .hw_reset = (hw_reset_t) elink_84833_hw_reset_phy, - .set_link_led = (set_link_led_t) elink_848xx_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_848xx_specific_func + .config_init = elink_848x3_config_init, + .read_status = elink_848xx_read_status, + .link_reset = elink_848x3_link_reset, + .config_loopback = NULL, + .format_fw_ver = elink_848xx_format_ver, + .hw_reset = elink_84833_hw_reset_phy, + .set_link_led = elink_848xx_set_link_led, + .phy_specific_func = elink_848xx_specific_func }; static const struct elink_phy phy_54618se = { @@ -11130,14 +11132,14 @@ static const struct elink_phy phy_54618se = { .speed_cap_mask = 0, /* req_duplex = */ 0, /* rsrv = */ 0, - .config_init = (config_init_t) elink_54618se_config_init, - .read_status = (read_status_t) elink_54618se_read_status, - .link_reset = (link_reset_t) elink_54618se_link_reset, - .config_loopback = (config_loopback_t) elink_54618se_config_loopback, - .format_fw_ver = (format_fw_ver_t) NULL, - .hw_reset = (hw_reset_t) NULL, - .set_link_led = (set_link_led_t) elink_5461x_set_link_led, - .phy_specific_func = (phy_specific_func_t) elink_54618se_specific_func + .config_init = elink_54618se_config_init, + .read_status = elink_54618se_read_status, + .link_reset = elink_54618se_link_reset, + .config_loopback = elink_54618se_config_loopback, + .format_fw_ver = NULL, + .hw_reset = NULL, + .set_link_led = elink_5461x_set_link_led, + .phy_specific_func = elink_54618se_specific_func }; /*****************************************************************/ @@ -12916,7 +12918,7 @@ static void elink_check_kr2_wa(struct elink_params *params, */ not_kr2_device = (((base_page & 0x8000) == 0) || (((base_page & 0x8000) && - ((next_page & 0xe0) == 0x2)))); + ((next_page & 0xe0) == 0x20)))); /* In case KR2 is already disabled, check if we need to re-enable it */ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 359a95d4..3a0b1ce4 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -379,7 +379,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, .wthresh = 0, }, .rx_free_thresh = 32, - .rx_drop_en = 0, + /* If no descriptors available, pkts are dropped by default */ + .rx_drop_en = 1, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -552,6 +553,8 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) bnxt_set_hwrm_link_config(bp, false); bnxt_disable_int(bp); bnxt_free_int(bp); + bnxt_free_tx_mbufs(bp); + bnxt_free_rx_mbufs(bp); bnxt_shutdown_nic(bp); bp->dev_stopped = 1; } @@ -563,8 +566,6 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) if (bp->dev_stopped == 0) bnxt_dev_stop_op(eth_dev); - bnxt_free_tx_mbufs(bp); - bnxt_free_rx_mbufs(bp); bnxt_free_mem(bp); if (eth_dev->data->mac_addrs != NULL) { rte_free(eth_dev->data->mac_addrs); diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index 8ff4c15d..d790b991 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -853,7 +853,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) HWRM_PREP(req, VNIC_ALLOC, -1, resp); if (vnic->func_default) - req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT; + req.flags = + rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); HWRM_CHECK_RESULT; diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c index 2bceb4db..6091a884 100644 --- a/drivers/net/bnxt/bnxt_ring.c +++ b/drivers/net/bnxt/bnxt_ring.c @@ -54,7 +54,7 @@ void bnxt_free_ring(struct bnxt_ring *ring) memset((char *)*ring->vmem, 0, ring->vmem_size); *ring->vmem = NULL; } - rte_memzone_free((const struct rte_memzone *)ring->mem_zone); + ring->mem_zone = NULL; } /* @@ -91,12 +91,14 @@ int bnxt_init_ring_grps(struct bnxt *bp) * rx bd ring - Only non-zero length if rx_ring_info is not NULL */ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, - struct bnxt_tx_ring_info *tx_ring_info, - struct bnxt_rx_ring_info *rx_ring_info, + struct bnxt_tx_queue *txq, + struct bnxt_rx_queue *rxq, struct bnxt_cp_ring_info *cp_ring_info, const char *suffix) { struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct; + struct bnxt_rx_ring_info *rx_ring_info = rxq ? rxq->rx_ring : NULL; + struct bnxt_tx_ring_info *tx_ring_info = txq ? txq->tx_ring : NULL; struct bnxt_ring *tx_ring; struct bnxt_ring *rx_ring; struct rte_pci_device *pdev = bp->pdev; @@ -152,6 +154,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, memset(mz->addr, 0, mz->len); if (tx_ring_info) { + txq->mz = mz; tx_ring = tx_ring_info->tx_ring_struct; tx_ring->bd = ((char *)mz->addr + tx_ring_start); @@ -171,6 +174,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, } if (rx_ring_info) { + rxq->mz = mz; rx_ring = rx_ring_info->rx_ring_struct; rx_ring->bd = ((char *)mz->addr + rx_ring_start); diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h index 22a56eb1..51fbb5cb 100644 --- a/drivers/net/bnxt/bnxt_ring.h +++ b/drivers/net/bnxt/bnxt_ring.h @@ -95,8 +95,8 @@ struct bnxt_cp_ring_info; void bnxt_free_ring(struct bnxt_ring *ring); int bnxt_init_ring_grps(struct bnxt *bp); int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, - struct bnxt_tx_ring_info *tx_ring_info, - struct bnxt_rx_ring_info *rx_ring_info, + struct bnxt_tx_queue *txq, + struct bnxt_rx_queue *rxq, struct bnxt_cp_ring_info *cp_ring_info, const char *suffix); int bnxt_alloc_hwrm_rings(struct bnxt *bp); diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c index cddf17d5..b7b170be 100644 --- a/drivers/net/bnxt/bnxt_rxq.c +++ b/drivers/net/bnxt/bnxt_rxq.c @@ -221,7 +221,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused) if (rxq) { sw_ring = rxq->rx_ring->rx_buf_ring; if (sw_ring) { - for (i = 0; i < rxq->nb_rx_desc; i++) { + for (i = 0; + i < rxq->rx_ring->rx_ring_struct->ring_size; i++) { if (sw_ring[i].mbuf) { rte_pktmbuf_free_seg(sw_ring[i].mbuf); sw_ring[i].mbuf = NULL; @@ -256,6 +257,8 @@ void bnxt_rx_queue_release_op(void *rx_queue) bnxt_free_ring(rxq->cp_ring->cp_ring_struct); bnxt_free_rxq_stats(rxq); + rte_memzone_free(rxq->mz); + rxq->mz = NULL; rte_free(rxq); } @@ -306,7 +309,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, eth_dev->data->rx_queues[queue_idx] = rxq; /* Allocate RX ring hardware descriptors */ - if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring, + if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring, "rxr")) { RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!"); bnxt_rx_queue_release_op(rxq); diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h index 95543298..2f8eb896 100644 --- a/drivers/net/bnxt/bnxt_rxq.h +++ b/drivers/net/bnxt/bnxt_rxq.h @@ -58,6 +58,7 @@ struct bnxt_rx_queue { uint32_t rx_buf_use_size; /* useable size */ struct bnxt_rx_ring_info *rx_ring; struct bnxt_cp_ring_info *cp_ring; + const struct rte_memzone *mz; }; void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq); diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index 5698f02f..ccf8196a 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -71,6 +71,7 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq, return -ENOMEM; rx_buf->mbuf = data; + data->data_off = RTE_PKTMBUF_HEADROOM; rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_dma_addr_default(data)); @@ -152,11 +153,15 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt, if (likely(RX_CMP_IP_CS_OK(rxcmp1))) mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + else if (likely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; else mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; if (likely(RX_CMP_L4_CS_OK(rxcmp1))) mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else if (likely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; else mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h index 111a2136..ec2832b5 100644 --- a/drivers/net/bnxt/bnxt_rxr.h +++ b/drivers/net/bnxt/bnxt_rxr.h @@ -37,22 +37,36 @@ #define B_RX_DB(db, prod) \ (*(uint32_t *)db = (DB_KEY_RX | prod)) -#define RX_CMP_L4_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC) +#define RX_CMP_L4_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC) -#define RX_CMP_L4_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR) +#define RX_CMP_L4_CS_ERR_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR | \ + RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR) #define RX_CMP_L4_CS_OK(rxcmp1) \ (((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \ !((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS)) -#define RX_CMP_IP_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR) +#define RX_CMP_L4_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) -#define RX_CMP_IP_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC) +#define RX_CMP_IP_CS_ERR_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR | \ + RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR) + +#define RX_CMP_IP_CS_BITS \ + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC | \ + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC) #define RX_CMP_IP_CS_OK(rxcmp1) \ (((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \ !((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS)) +#define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \ + !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) + struct bnxt_sw_rx_bd { struct rte_mbuf *mbuf; /* data associated with RX descriptor */ }; diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c index 99dddddf..0d2cb499 100644 --- a/drivers/net/bnxt/bnxt_txq.c +++ b/drivers/net/bnxt/bnxt_txq.c @@ -93,6 +93,8 @@ void bnxt_tx_queue_release_op(void *tx_queue) bnxt_free_ring(txq->cp_ring->cp_ring_struct); bnxt_free_txq_stats(txq); + rte_memzone_free(txq->mz); + txq->mz = NULL; rte_free(txq); } @@ -140,7 +142,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, txq->port_id = eth_dev->data->port_id; /* Allocate TX ring hardware descriptors */ - if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring, + if (bnxt_alloc_rings(bp, queue_idx, txq, NULL, txq->cp_ring, "txr")) { RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!"); bnxt_tx_queue_release_op(txq); diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h index 16f3a0bd..8a23f003 100644 --- a/drivers/net/bnxt/bnxt_txq.h +++ b/drivers/net/bnxt/bnxt_txq.h @@ -61,6 +61,7 @@ struct bnxt_tx_queue { unsigned int cp_nr_rings; struct bnxt_cp_ring_info *cp_ring; + const struct rte_memzone *mz; }; void bnxt_free_txq_stats(struct bnxt_tx_queue *txq); diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c index 4b6f1471..7bb2fe1e 100644 --- a/drivers/net/bonding/rte_eth_bond_api.c +++ b/drivers/net/bonding/rte_eth_bond_api.c @@ -361,9 +361,12 @@ slave_vlan_filter_set(uint8_t bonded_port_id, uint8_t slave_port_id) for (i = 0, mask = 1; i < RTE_BITMAP_SLAB_BIT_SIZE; i ++, mask <<= 1) { - if (unlikely(slab & mask)) + if (unlikely(slab & mask)) { + uint16_t vlan_id = pos + i; + res = rte_eth_dev_vlan_filter(slave_port_id, - (uint16_t)pos, 1); + vlan_id, 1); + } } found = rte_bitmap_scan(internals->vlan_filter_bmp, &pos, &slab); diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index c1ec3aa4..6f8931ef 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -1525,7 +1525,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) if (internals->slave_count == 0) { RTE_BOND_LOG(ERR, "Cannot start port since there are no slave devices"); - return -1; + goto out_err; } if (internals->user_defined_mac == 0) { @@ -1536,18 +1536,18 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) new_mac_addr = &internals->slaves[i].persisted_mac_addr; if (new_mac_addr == NULL) - return -1; + goto out_err; if (mac_address_set(eth_dev, new_mac_addr) != 0) { RTE_BOND_LOG(ERR, "bonded port (%d) failed to update MAC address", eth_dev->data->port_id); - return -1; + goto out_err; } } /* Update all slave devices MACs*/ if (mac_address_slaves_update(eth_dev) != 0) - return -1; + goto out_err; /* If bonded device is configure in promiscuous mode then re-apply config */ if (internals->promiscuous_en) @@ -1560,7 +1560,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) RTE_BOND_LOG(ERR, "bonded port (%d) failed to reconfigure slave device (%d)", eth_dev->data->port_id, internals->slaves[i].port_id); - return -1; + goto out_err; } /* We will need to poll for link status if any slave doesn't * support interrupts @@ -1568,6 +1568,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) if (internals->slaves[i].link_status_poll_enabled) internals->link_status_polling_enabled = 1; } + /* start polling if needed */ if (internals->link_status_polling_enabled) { rte_eal_alarm_set( @@ -1587,6 +1588,10 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) bond_tlb_enable(internals); return 0; + +out_err: + eth_dev->data->dev_started = 0; + return -1; } static void diff --git a/drivers/net/bonding/rte_eth_bond_version.map b/drivers/net/bonding/rte_eth_bond_version.map index 2de0a7d3..b13bb0e2 100644 --- a/drivers/net/bonding/rte_eth_bond_version.map +++ b/drivers/net/bonding/rte_eth_bond_version.map @@ -3,6 +3,7 @@ DPDK_2.0 { rte_eth_bond_8023ad_conf_get; rte_eth_bond_8023ad_setup; + rte_eth_bond_8023ad_slave_info; rte_eth_bond_active_slaves_get; rte_eth_bond_create; rte_eth_bond_link_monitoring_set; diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile index bfcc3159..03c74d24 100644 --- a/drivers/net/cxgbe/Makefile +++ b/drivers/net/cxgbe/Makefile @@ -49,7 +49,7 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd188 +CFLAGS_BASE_DRIVER = -diag-disable 188 else # # CFLAGS for gcc/clang diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile index 57a60f0f..3858148f 100644 --- a/drivers/net/e1000/Makefile +++ b/drivers/net/e1000/Makefile @@ -47,7 +47,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 177 -diag-disable 181 -diag-disable 188 +CFLAGS_BASE_DRIVER += -diag-disable 869 -diag-disable 2259 else # # CFLAGS for gcc/clang diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c index 1cd031ac..6e33c841 100644 --- a/drivers/net/enic/base/vnic_dev.c +++ b/drivers/net/enic/base/vnic_dev.c @@ -538,17 +538,9 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) { u64 a0, a1; int wait = 1000; - static u32 instance; - char name[NAME_MAX]; - if (!vdev->stats) { - snprintf((char *)name, sizeof(name), - "vnic_stats-%d", instance++); - vdev->stats = vdev->alloc_consistent(vdev->priv, - sizeof(struct vnic_stats), &vdev->stats_pa, (u8 *)name); - if (!vdev->stats) - return -ENOMEM; - } + if (!vdev->stats) + return -ENOMEM; *stats = vdev->stats; a0 = vdev->stats_pa; @@ -940,6 +932,18 @@ u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev) return vdev->intr_coal_timer_info.max_usec; } +int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev) +{ + char name[NAME_MAX]; + static u32 instance; + + snprintf((char *)name, sizeof(name), "vnic_stats-%u", instance++); + vdev->stats = vdev->alloc_consistent(vdev->priv, + sizeof(struct vnic_stats), + &vdev->stats_pa, (u8 *)name); + return vdev->stats == NULL ? -ENOMEM : 0; +} + void vnic_dev_unregister(struct vnic_dev *vdev) { if (vdev) { diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h index 06ebd4ce..7af91cfc 100644 --- a/drivers/net/enic/base/vnic_dev.h +++ b/drivers/net/enic/base/vnic_dev.h @@ -191,6 +191,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, void *priv, struct rte_pci_device *pdev, struct vnic_dev_bar *bar, unsigned int num_bars); struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev); +int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev); int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback); int vnic_dev_get_size(void); int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op); diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index 63d0c50d..ef5ecd44 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -1254,6 +1254,8 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) /* free and reallocate RQs with the new MTU */ for (rq_idx = 0; rq_idx < enic->rq_count; rq_idx++) { rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; + if (!rq->in_use) + continue; enic_free_rq(rq); rc = enic_alloc_rq(enic, rq_idx, rq->socket_id, rq->mp, @@ -1377,6 +1379,15 @@ int enic_probe(struct enic *enic) enic_alloc_consistent, enic_free_consistent); + /* + * Allocate the consistent memory for stats upfront so both primary and + * secondary processes can dump stats. + */ + err = vnic_dev_alloc_stats_mem(enic->vdev); + if (err) { + dev_err(enic, "Failed to allocate cmd memory, aborting\n"); + goto err_out_unregister; + } /* Issue device open to get device in known state */ err = enic_dev_open(enic); if (err) { diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile index afcbd1d8..4b1c194a 100644 --- a/drivers/net/fm10k/Makefile +++ b/drivers/net/fm10k/Makefile @@ -47,7 +47,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile index 9c9a8671..8fb4da49 100644 --- a/drivers/net/i40e/Makefile +++ b/drivers/net/i40e/Makefile @@ -49,7 +49,7 @@ LIBABIVER := 1 # to disable warnings # ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) -CFLAGS_BASE_DRIVER = -wd593 -wd188 +CFLAGS_BASE_DRIVER = -diag-disable 593 -diag-disable 188 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) CFLAGS_BASE_DRIVER += -Wno-sign-compare CFLAGS_BASE_DRIVER += -Wno-unused-value diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h index caa2e1eb..9b841eb8 100644 --- a/drivers/net/i40e/base/i40e_register.h +++ b/drivers/net/i40e/base/i40e_register.h @@ -90,7 +90,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT) #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT) #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */ #define I40E_PF_ARQT_ARQT_SHIFT 0 #define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT) @@ -113,7 +113,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT) #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT) #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */ #define I40E_PF_ATQT_ATQT_SHIFT 0 #define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT) @@ -140,7 +140,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT) #define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ARQT_MAX_INDEX 127 #define I40E_VF_ARQT_ARQT_SHIFT 0 @@ -168,7 +168,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT) #define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */ #define I40E_VF_ATQT_MAX_INDEX 127 #define I40E_VF_ATQT_ATQT_SHIFT 0 @@ -291,7 +291,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31 -#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) +#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) #define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */ #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0 #define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT) @@ -535,7 +535,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT) #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31 -#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) +#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT) #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSRWD_MAX_INDEX 3 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0 @@ -1274,14 +1274,14 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT) #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31 -#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) +#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT) #define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */ #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0 #define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT) #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16 #define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT) #define I40E_PFLAN_QALLOC_VALID_SHIFT 31 -#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT) +#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT) #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */ #define I40E_QRX_ENA_MAX_INDEX 1535 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0 @@ -1692,7 +1692,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_GLNVM_SRCTL_START_SHIFT 30 #define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT) #define I40E_GLNVM_SRCTL_DONE_SHIFT 31 -#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT) +#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT) #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */ #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0 #define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT) @@ -3059,7 +3059,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8 #define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT) #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31 -#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT) +#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT) #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */ #define I40E_VP_MDET_RX_MAX_INDEX 127 #define I40E_VP_MDET_RX_VALID_SHIFT 0 @@ -3196,7 +3196,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 #define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ #define I40E_VF_ARQT1_ARQT_SHIFT 0 #define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) @@ -3219,7 +3219,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 #define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ #define I40E_VF_ATQT1_ATQT_SHIFT 0 #define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 0b270b69..7e93c3e1 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1316,6 +1316,7 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) struct i40e_filter_control_settings settings; int ret; uint8_t aq_fail = 0; + int retries = 0; PMD_INIT_FUNC_TRACE(); @@ -1355,9 +1356,20 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev) /* disable uio intr before callback unregister */ rte_intr_disable(&(pci_dev->intr_handle)); - /* register callback func to eal lib */ - rte_intr_callback_unregister(&(pci_dev->intr_handle), - i40e_dev_interrupt_handler, (void *)dev); + /* unregister callback func to eal lib */ + do { + ret = rte_intr_callback_unregister(&(pci_dev->intr_handle), + i40e_dev_interrupt_handler, (void *)dev); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + return ret; + } + i40e_msec_delay(500); + } while (retries++ < 5); return 0; } @@ -1996,6 +2008,8 @@ i40e_dev_close(struct rte_eth_dev *dev) i40e_pf_disable_irq0(hw); rte_intr_disable(&(dev->pci_dev->intr_handle)); + i40e_fdir_teardown(pf); + /* shutdown and destroy the HMC */ i40e_shutdown_lan_hmc(hw); @@ -2007,7 +2021,6 @@ i40e_dev_close(struct rte_eth_dev *dev) pf->vmdq = NULL; /* release all the existing VSIs and VEBs */ - i40e_fdir_teardown(pf); i40e_vsi_release(pf->main_vsi); /* shutdown the adminq */ @@ -2117,77 +2130,139 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev) return i40e_phy_conf_link(hw, abilities, speed, false); } -int -i40e_dev_link_update(struct rte_eth_dev *dev, - int wait_to_complete) +static inline void __attribute__((always_inline)) +update_link_no_wait(struct i40e_hw *hw, struct rte_eth_link *link) +{ +/* Link status registers and values*/ +#define I40E_PRTMAC_LINKSTA 0x001E2420 +#define I40E_REG_LINK_UP 0x40000080 +#define I40E_PRTMAC_MACC 0x001E24E0 +#define I40E_REG_MACC_25GB 0x00020000 +#define I40E_REG_SPEED_MASK 0x38000000 +#define I40E_REG_SPEED_100MB 0x00000000 +#define I40E_REG_SPEED_1GB 0x08000000 +#define I40E_REG_SPEED_10GB 0x10000000 +#define I40E_REG_SPEED_20GB 0x20000000 +#define I40E_REG_SPEED_25_40GB 0x18000000 + uint32_t link_speed; + uint32_t reg_val; + + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_LINKSTA); + link_speed = reg_val & I40E_REG_SPEED_MASK; + reg_val &= I40E_REG_LINK_UP; + link->link_status = (reg_val == I40E_REG_LINK_UP) ? 1 : 0; + + if (unlikely(link->link_status == 0)) + return; + + /* Parse the link status */ + switch (link_speed) { + case I40E_REG_SPEED_100MB: + link->link_speed = ETH_SPEED_NUM_100M; + break; + case I40E_REG_SPEED_1GB: + link->link_speed = ETH_SPEED_NUM_1G; + break; + case I40E_REG_SPEED_10GB: + link->link_speed = ETH_SPEED_NUM_10G; + break; + case I40E_REG_SPEED_20GB: + link->link_speed = ETH_SPEED_NUM_20G; + break; + case I40E_REG_SPEED_25_40GB: + reg_val = I40E_READ_REG(hw, I40E_PRTMAC_MACC); + + if (reg_val & I40E_REG_MACC_25GB) + link->link_speed = ETH_SPEED_NUM_25G; + else + link->link_speed = ETH_SPEED_NUM_40G; + + break; + default: + PMD_DRV_LOG(ERR, "Unknown link speed info %u", link_speed); + break; + } +} + +static inline void __attribute__((always_inline)) +update_link_wait(struct i40e_hw *hw, struct rte_eth_link *link, + bool enable_lse) { -#define CHECK_INTERVAL 100 /* 100ms */ -#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ - struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 10 /* 1s (10 * 100ms) in total */ + uint32_t rep_cnt = MAX_REPEAT_TIME; struct i40e_link_status link_status; - struct rte_eth_link link, old; int status; - unsigned rep_cnt = MAX_REPEAT_TIME; - bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; - memset(&link, 0, sizeof(link)); - memset(&old, 0, sizeof(old)); memset(&link_status, 0, sizeof(link_status)); - rte_i40e_dev_atomic_read_link_status(dev, &old); do { /* Get link status information from hardware */ status = i40e_aq_get_link_info(hw, enable_lse, &link_status, NULL); - if (status != I40E_SUCCESS) { - link.link_speed = ETH_SPEED_NUM_100M; - link.link_duplex = ETH_LINK_FULL_DUPLEX; + if (unlikely(status != I40E_SUCCESS)) { + link->link_speed = ETH_SPEED_NUM_100M; + link->link_duplex = ETH_LINK_FULL_DUPLEX; PMD_DRV_LOG(ERR, "Failed to get link info"); - goto out; + return; } - link.link_status = link_status.link_info & I40E_AQ_LINK_UP; - if (!wait_to_complete || link.link_status) + link->link_status = link_status.link_info & I40E_AQ_LINK_UP; + if (unlikely(link->link_status != 0)) break; rte_delay_ms(CHECK_INTERVAL); } while (--rep_cnt); - if (!link.link_status) - goto out; - - /* i40e uses full duplex only */ - link.link_duplex = ETH_LINK_FULL_DUPLEX; - /* Parse the link status */ switch (link_status.link_speed) { case I40E_LINK_SPEED_100MB: - link.link_speed = ETH_SPEED_NUM_100M; + link->link_speed = ETH_SPEED_NUM_100M; break; case I40E_LINK_SPEED_1GB: - link.link_speed = ETH_SPEED_NUM_1G; + link->link_speed = ETH_SPEED_NUM_1G; break; case I40E_LINK_SPEED_10GB: - link.link_speed = ETH_SPEED_NUM_10G; + link->link_speed = ETH_SPEED_NUM_10G; break; case I40E_LINK_SPEED_20GB: - link.link_speed = ETH_SPEED_NUM_20G; + link->link_speed = ETH_SPEED_NUM_20G; break; case I40E_LINK_SPEED_25GB: - link.link_speed = ETH_SPEED_NUM_25G; + link->link_speed = ETH_SPEED_NUM_25G; break; case I40E_LINK_SPEED_40GB: - link.link_speed = ETH_SPEED_NUM_40G; + link->link_speed = ETH_SPEED_NUM_40G; break; default: - link.link_speed = ETH_SPEED_NUM_100M; + link->link_speed = ETH_SPEED_NUM_100M; break; } +} + +int +i40e_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete) +{ + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link, old; + bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false; + + memset(&link, 0, sizeof(link)); + memset(&old, 0, sizeof(old)); + rte_i40e_dev_atomic_read_link_status(dev, &old); + + /* i40e uses full duplex only */ + link.link_duplex = ETH_LINK_FULL_DUPLEX; link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); -out: + if (!wait_to_complete) + update_link_no_wait(hw, &link); + else + update_link_wait(hw, &link, enable_lse); + rte_i40e_dev_atomic_write_link_status(dev, &link); if (link.link_status == old.link_status) return -1; diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index 94ddc7b8..516ff305 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -47,9 +47,10 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 -CFLAGS_ixgbe_rxtx.o += -wd3656 +CFLAGS_ixgbe_rxtx.o += -diag-disable 3656 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) # diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 2dc69ff9..182a8c66 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -1300,6 +1300,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; struct ixgbe_hw *hw; + int retries = 0; + int ret; PMD_INIT_FUNC_TRACE(); @@ -1321,8 +1323,20 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* disable uio intr before callback unregister */ rte_intr_disable(&(pci_dev->intr_handle)); - rte_intr_callback_unregister(&(pci_dev->intr_handle), - ixgbe_dev_interrupt_handler, (void *)eth_dev); + + do { + ret = rte_intr_callback_unregister(&(pci_dev->intr_handle), + ixgbe_dev_interrupt_handler, (void *)eth_dev); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + return ret; + } + rte_delay_ms(100); + } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); /* uninitialize PF if max_vfs not zero */ ixgbe_pf_host_uninit(eth_dev); @@ -2083,11 +2097,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { const struct rte_eth_dcb_rx_conf *conf; - if (nb_rx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->rx_adv_conf.dcb_rx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -2101,11 +2110,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { const struct rte_eth_dcb_tx_conf *conf; - if (nb_tx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->tx_adv_conf.dcb_tx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -5408,8 +5412,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* won't configure msix register if no mapping is done * between intr vector and event fd + * but if misx has been enabled already, need to configure + * auto clean, auto mask and throttling. */ - if (!rte_intr_dp_is_en(intr_handle)) + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + if (!rte_intr_dp_is_en(intr_handle) && + !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) return; if (rte_intr_allow_others(intr_handle)) @@ -5433,27 +5441,30 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* Populate the IVAR table and set the ITR values to the * corresponding register. */ - for (queue_id = 0; queue_id < dev->data->nb_rx_queues; - queue_id++) { - /* by default, 1:1 mapping */ - ixgbe_set_ivar_map(hw, 0, queue_id, vec); - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + if (rte_intr_dp_is_en(intr_handle)) { + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + ixgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, - IXGBE_MISC_VEC_ID); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); - break; - default: - break; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar_map(hw, -1, + IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MISC_VEC_ID); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); + break; + default: + break; + } } IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 86d1e44c..eb266b2c 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -422,17 +422,16 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) break; } if (attr_ctx == NULL) { - ibv_free_device_list(list); switch (err) { case 0: ERROR("cannot access device, is mlx5_ib loaded?"); - return -ENODEV; + err = ENODEV; + break; case EINVAL: ERROR("cannot use device, are drivers up to date?"); - return -EINVAL; + break; } - assert(err > 0); - return -err; + goto error; } ibv_dev = list[i]; @@ -685,6 +684,8 @@ port_error: claim_zero(ibv_dealloc_pd(pd)); if (ctx) claim_zero(ibv_close_device(ctx)); + if (eth_dev && rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_eth_dev_release_port(eth_dev); break; } diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index 1b0fa40a..4e11eadb 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -36,6 +36,12 @@ #include <assert.h> #include <stdint.h> +/* + * Not needed by this file; included to work around the lack of off_t + * definition for mlx5dv.h with unpatched rdma-core versions. + */ +#include <sys/types.h> + /* DPDK headers don't like -pedantic. */ #ifdef PEDANTIC #pragma GCC diagnostic ignored "-Wpedantic" diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 1f42dac8..45811498 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -323,7 +323,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) for (i = 0; i < txq->tx_count; i++) { if (txq->txbufs[i].mbuf) { - rte_pktmbuf_free(txq->txbufs[i].mbuf); + rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); txq->txbufs[i].mbuf = NULL; } } @@ -620,7 +620,7 @@ static void nfp_net_read_mac(struct nfp_net_hw *hw) uint32_t tmp; tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR)); - memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr)); + memcpy(&hw->mac_addr[0], &tmp, 4); tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4)); memcpy(&hw->mac_addr[4], &tmp, 2); @@ -1731,16 +1731,16 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) break; } + rxds = &rxq->rxds[idx]; + if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) + break; + /* * Memory barrier to ensure that we won't do other * reads before the DD bit. */ rte_rmb(); - rxds = &rxq->rxds[idx]; - if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) - break; - /* * We got a packet. Let's alloc a new mbuff for refilling the * free descriptor ring as soon as possible @@ -1801,6 +1801,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) mb->nb_segs = 1; mb->next = NULL; + mb->port = rxq->port_id; + /* Checking the RSS flag */ nfp_net_set_hash(rxq, rxds, mb); diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c index 9704895f..067ee293 100644 --- a/drivers/net/null/rte_eth_null.c +++ b/drivers/net/null/rte_eth_null.c @@ -93,7 +93,7 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG, + .link_autoneg = ETH_LINK_FIXED, }; static uint16_t diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index 76c131b3..27590d39 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -124,7 +124,7 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG, + .link_autoneg = ETH_LINK_FIXED, }; static int diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile index 18150b58..323423ec 100644 --- a/drivers/net/qede/Makefile +++ b/drivers/net/qede/Makefile @@ -67,8 +67,8 @@ ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion endif else #ICC -CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type -CFLAGS_qede_ethdev.o += -wd279 #279: controlling expression is constant +CFLAGS_BASE_DRIVER += -diag-disable 188 #188: enumerated type mixed with another type +CFLAGS_qede_ethdev.o += -diag-disable 279 #279: controlling expression is constant endif # diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c index 3f895cd4..b9dc1d6d 100644 --- a/drivers/net/qede/base/bcm_osal.c +++ b/drivers/net/qede/base/bcm_osal.c @@ -122,7 +122,7 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev, snprintf(mz_name, sizeof(mz_name) - 1, "%lx", (unsigned long)rte_get_timer_cycles()); if (core_id == (unsigned int)LCORE_ID_ANY) - core_id = 0; + core_id = rte_get_master_lcore(); socket_id = rte_lcore_to_socket_id(core_id); mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, RTE_CACHE_LINE_SIZE); @@ -152,7 +152,7 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev, snprintf(mz_name, sizeof(mz_name) - 1, "%lx", (unsigned long)rte_get_timer_cycles()); if (core_id == (unsigned int)LCORE_ID_ANY) - core_id = 0; + core_id = rte_get_master_lcore(); socket_id = rte_lcore_to_socket_id(core_id); mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align); if (!mz) { diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c index 6fb037df..207b01b9 100644 --- a/drivers/net/qede/base/ecore_int.c +++ b/drivers/net/qede/base/ecore_int.c @@ -6,6 +6,8 @@ * See LICENSE.qede_pmd for copyright and licensing details. */ +#include <rte_string_fns.h> + #include "bcm_osal.h" #include "ecore.h" #include "ecore_spq.h" @@ -938,9 +940,10 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, p_aeu->bit_name, bit); else - OSAL_STRNCPY(bit_name, - p_aeu->bit_name, - 30); + strlcpy(bit_name, + p_aeu->bit_name, + sizeof(bit_name)); + /* Handle source of the attention */ ecore_int_deassertion_aeu_bit(p_hwfn, p_aeu, diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 5275ef9d..ce3a0936 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -248,10 +248,10 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, if ((memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) && ucast->vlan == tmp->vlan) { - DP_ERR(edev, "Unicast MAC is already added" - " with vlan = %u, vni = %u\n", - ucast->vlan, ucast->vni); - return -EEXIST; + DP_INFO(edev, "Unicast MAC is already added" + " with vlan = %u, vni = %u\n", + ucast->vlan, ucast->vni); + return 0; } } u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), @@ -613,9 +613,9 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, SLIST_FOREACH(tmp, &qdev->vlan_list_head, list) { if (tmp->vid == vlan_id) { - DP_ERR(edev, "VLAN %u already configured\n", - vlan_id); - return -EEXIST; + DP_INFO(edev, "VLAN %u already configured\n", + vlan_id); + return 0; } } diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c index a6d8ef4e..60e24a78 100644 --- a/drivers/net/qede/qede_main.c +++ b/drivers/net/qede/qede_main.c @@ -9,6 +9,7 @@ #include <limits.h> #include <time.h> #include <rte_alarm.h> +#include <rte_string_fns.h> #include "qede_ethdev.h" @@ -302,9 +303,8 @@ static int qed_slowpath_start(struct ecore_dev *edev, drv_version.version = (params->drv_major << 24) | (params->drv_minor << 16) | (params->drv_rev << 8) | (params->drv_eng); - /* TBD: strlcpy() */ - strncpy((char *)drv_version.name, (const char *)params->name, - MCP_DRV_VER_STR_SIZE - 4); + strlcpy((char *)drv_version.name, (const char *)params->name, + sizeof(drv_version.name)); rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, &drv_version); if (rc) { diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c index 729d38c5..cbdf15f2 100644 --- a/drivers/net/ring/rte_eth_ring.c +++ b/drivers/net/ring/rte_eth_ring.c @@ -80,7 +80,7 @@ static struct rte_eth_link pmd_link = { .link_speed = ETH_SPEED_NUM_10G, .link_duplex = ETH_LINK_FULL_DUPLEX, .link_status = ETH_LINK_DOWN, - .link_autoneg = ETH_LINK_AUTONEG + .link_autoneg = ETH_LINK_FIXED, }; static uint16_t diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c index 9cf408ed..8b091cfe 100644 --- a/drivers/net/szedata2/rte_eth_szedata2.c +++ b/drivers/net/szedata2/rte_eth_szedata2.c @@ -1051,22 +1051,29 @@ eth_stats_get(struct rte_eth_dev *dev, uint64_t tx_err_total = 0; uint64_t rx_total_bytes = 0; uint64_t tx_total_bytes = 0; - const struct pmd_internals *internals = dev->data->dev_private; - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_rx; i++) { - stats->q_ipackets[i] = internals->rx_queue[i].rx_pkts; - stats->q_ibytes[i] = internals->rx_queue[i].rx_bytes; - rx_total += stats->q_ipackets[i]; - rx_total_bytes += stats->q_ibytes[i]; + for (i = 0; i < nb_rx; i++) { + struct szedata2_rx_queue *rxq = dev->data->rx_queues[i]; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_ipackets[i] = rxq->rx_pkts; + stats->q_ibytes[i] = rxq->rx_bytes; + } + rx_total += rxq->rx_pkts; + rx_total_bytes += rxq->rx_bytes; } - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_tx; i++) { - stats->q_opackets[i] = internals->tx_queue[i].tx_pkts; - stats->q_obytes[i] = internals->tx_queue[i].tx_bytes; - stats->q_errors[i] = internals->tx_queue[i].err_pkts; - tx_total += stats->q_opackets[i]; - tx_total_bytes += stats->q_obytes[i]; - tx_err_total += stats->q_errors[i]; + for (i = 0; i < nb_tx; i++) { + struct szedata2_tx_queue *txq = dev->data->tx_queues[i]; + + if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { + stats->q_opackets[i] = txq->tx_pkts; + stats->q_obytes[i] = txq->tx_bytes; + stats->q_errors[i] = txq->err_pkts; + } + tx_total += txq->tx_pkts; + tx_total_bytes += txq->tx_bytes; + tx_err_total += txq->err_pkts; } stats->ipackets = rx_total; @@ -1359,9 +1366,9 @@ get_szedata2_index(struct rte_eth_dev *dev, uint32_t *index) char pcislot_path[PATH_MAX]; struct rte_pci_addr pcislot_addr = dev->pci_dev->addr; uint32_t domain; - uint32_t bus; - uint32_t devid; - uint32_t function; + uint8_t bus; + uint8_t devid; + uint8_t function; dir = opendir("/sys/class/combo"); if (dir == NULL) @@ -1386,7 +1393,7 @@ get_szedata2_index(struct rte_eth_dev *dev, uint32_t *index) if (fd == NULL) continue; - ret = fscanf(fd, "%4" PRIx16 ":%2" PRIx8 ":%2" PRIx8 ".%" PRIx8, + ret = fscanf(fd, "%8" SCNx32 ":%2" SCNx8 ":%2" SCNx8 ".%" SCNx8, &domain, &bus, &devid, &function); fclose(fd); if (ret != 4) diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h index 00dd2feb..256e8daf 100644 --- a/drivers/net/thunderx/base/nicvf_hw_defs.h +++ b/drivers/net/thunderx/base/nicvf_hw_defs.h @@ -197,7 +197,10 @@ /* Min/Max packet size */ #define NIC_HW_MIN_FRS (64) -#define NIC_HW_MAX_FRS (9200) /* 9216 max pkt including FCS */ +/* ETH_HLEN+ETH_FCS_LEN+2*VLAN_HLEN */ +#define NIC_HW_L2_OVERHEAD (26) +#define NIC_HW_MAX_MTU (9190) +#define NIC_HW_MAX_FRS (NIC_HW_MAX_MTU + NIC_HW_L2_OVERHEAD) #define NIC_HW_MAX_SEGS (12) /* Descriptor alignments */ diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c index d229bdff..d0f0d52d 100644 --- a/drivers/net/thunderx/nicvf_ethdev.c +++ b/drivers/net/thunderx/nicvf_ethdev.c @@ -161,7 +161,7 @@ static int nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { struct nicvf *nic = nicvf_pmd_priv(dev); - uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t buffsz, frame_size = mtu + NIC_HW_L2_OVERHEAD; size_t i; PMD_INIT_FUNC_TRACE(); @@ -178,7 +178,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * Refuse mtu that requires the support of scattered packets * when this feature has not been enabled before. */ - if (!dev->data->scattered_rx && + if (dev->data->dev_started && !dev->data->scattered_rx && (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) return -EINVAL; @@ -192,11 +192,11 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) else dev->data->dev_conf.rxmode.jumbo_frame = 0; - if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) + if (nicvf_mbox_update_hw_max_frs(nic, mtu)) return -EINVAL; - /* Update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size; + /* Update max_rx_pkt_len */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = mtu + ETHER_HDR_LEN; nic->mtu = mtu; for (i = 0; i < nic->sqs_count; i++) @@ -1338,7 +1338,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) PMD_INIT_FUNC_TRACE(); dev_info->min_rx_bufsize = ETHER_MIN_MTU; - dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; + dev_info->max_rx_pktlen = NIC_HW_MAX_MTU + ETHER_HDR_LEN; dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1); dev_info->max_tx_queues = @@ -1658,8 +1658,7 @@ nicvf_dev_start(struct rte_eth_dev *dev) /* Setup MTU based on max_rx_pkt_len or default */ mtu = dev->data->dev_conf.rxmode.jumbo_frame ? dev->data->dev_conf.rxmode.max_rx_pkt_len - - ETHER_HDR_LEN - ETHER_CRC_LEN - : ETHER_MTU; + - ETHER_HDR_LEN : ETHER_MTU; if (nicvf_dev_set_mtu(dev, mtu)) { PMD_INIT_LOG(ERR, "Failed to set default mtu size"); diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 8fde6030..12922df6 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -115,6 +115,7 @@ struct pmd_internal { char *dev_name; char *iface_name; uint16_t max_queues; + int vid; }; struct internal_list { @@ -1066,6 +1067,7 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues, data->nb_rx_queues = queues; data->nb_tx_queues = queues; internal->max_queues = queues; + internal->vid = -1; data->dev_link = pmd_link; data->mac_addrs = eth_addr; diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c index 6f4845b7..a670da4d 100644 --- a/drivers/net/virtio/virtio_user/vhost_user.c +++ b/drivers/net/virtio/virtio_user/vhost_user.c @@ -134,12 +134,13 @@ struct hugepage_file_info { static int get_hugepage_file_info(struct hugepage_file_info huges[], int max) { - int idx; + int idx, k, exist; FILE *f; char buf[BUFSIZ], *tmp, *tail; char *str_underline, *str_start; int huge_index; uint64_t v_start, v_end; + struct stat stats; f = fopen("/proc/self/maps", "r"); if (!f) { @@ -179,16 +180,39 @@ get_hugepage_file_info(struct hugepage_file_info huges[], int max) if (sscanf(str_start, "map_%d", &huge_index) != 1) continue; + /* skip duplicated file which is mapped to different regions */ + for (k = 0, exist = -1; k < idx; ++k) { + if (!strcmp(huges[k].path, tmp)) { + exist = k; + break; + } + } + if (exist >= 0) + continue; + if (idx >= max) { PMD_DRV_LOG(ERR, "Exceed maximum of %d", max); goto error; } + huges[idx].addr = v_start; - huges[idx].size = v_end - v_start; + huges[idx].size = v_end - v_start; /* To be corrected later */ snprintf(huges[idx].path, PATH_MAX, "%s", tmp); idx++; } + /* correct the size for files who have many regions */ + for (k = 0; k < idx; ++k) { + if (stat(huges[k].path, &stats) < 0) { + PMD_DRV_LOG(ERR, "Failed to stat %s, %s\n", + huges[k].path, strerror(errno)); + continue; + } + huges[k].size = stats.st_size; + PMD_DRV_LOG(INFO, "file %s, size %zx\n", + huges[k].path, huges[k].size); + } + fclose(f); return idx; diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile index 23ff1da2..f03e1154 100644 --- a/drivers/net/vmxnet3/Makefile +++ b/drivers/net/vmxnet3/Makefile @@ -43,7 +43,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 174 -diag-disable 593 -diag-disable 869 +CFLAGS_BASE_DRIVER += -diag-disable 981 -diag-disable 2259 else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) # diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index 9a889c6f..4c898d8e 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -488,6 +488,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i]; vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i]; + txq->shared = &hw->tqd_start[i]; + tqd->ctrl.txNumDeferred = 0; tqd->ctrl.txThreshold = 1; tqd->conf.txRingBasePA = txq->cmd_ring.basePA; @@ -507,6 +509,8 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i]; vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i]; + rxq->shared = &hw->rqd_start[i]; + rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA; rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA; rqd->conf.compRingBasePA = rxq->comp_ring.basePA; @@ -790,7 +794,7 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev, link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; link.link_speed = ETH_SPEED_NUM_10G; - link.link_autoneg = ETH_LINK_AUTONEG; + link.link_autoneg = ETH_LINK_FIXED; } vmxnet3_dev_atomic_write_link_status(dev, &link); diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index 5ef7773b..0a695883 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -862,7 +862,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->queue_id = queue_idx; txq->port_id = dev->data->port_id; - txq->shared = &hw->tqd_start[queue_idx]; + txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */ txq->hw = hw; txq->qid = queue_idx; txq->stopped = TRUE; @@ -963,7 +963,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->mp = mp; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->shared = &hw->rqd_start[queue_idx]; + rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */ rxq->hw = hw; rxq->qid1 = queue_idx; rxq->qid2 = queue_idx + hw->num_rx_queues; diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c index 331d2f48..e70549be 100644 --- a/examples/exception_path/main.c +++ b/examples/exception_path/main.c @@ -71,13 +71,21 @@ #include <rte_string_fns.h> #include <rte_cycles.h> +#ifndef APP_MAX_LCORE +#if (RTE_MAX_LCORE > 64) +#define APP_MAX_LCORE 64 +#else +#define APP_MAX_LCORE RTE_MAX_LCORE +#endif +#endif + /* Macros for printing using RTE_LOG */ #define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1 #define FATAL_ERROR(fmt, args...) rte_exit(EXIT_FAILURE, fmt "\n", ##args) #define PRINT_INFO(fmt, args...) RTE_LOG(INFO, APP, fmt "\n", ##args) /* Max ports than can be used (each port is associated with two lcores) */ -#define MAX_PORTS (RTE_MAX_LCORE / 2) +#define MAX_PORTS (APP_MAX_LCORE / 2) /* Max size of a single packet */ #define MAX_PACKET_SZ (2048) @@ -134,7 +142,7 @@ static uint64_t input_cores_mask = 0; static uint64_t output_cores_mask = 0; /* Array storing port_id that is associated with each lcore */ -static uint8_t port_ids[RTE_MAX_LCORE]; +static uint8_t port_ids[APP_MAX_LCORE]; /* Structure type for recording lcore-specific stats */ struct stats { @@ -144,7 +152,7 @@ struct stats { } __rte_cache_aligned; /* Array of lcore-specific stats */ -static struct stats lcore_stats[RTE_MAX_LCORE]; +static struct stats lcore_stats[APP_MAX_LCORE]; /* Print out statistics on packets handled */ static void @@ -339,7 +347,9 @@ setup_port_lcore_affinities(void) uint8_t rx_port = 0; /* Setup port_ids[] array, and check masks were ok */ - RTE_LCORE_FOREACH(i) { + for (i = 0; i < APP_MAX_LCORE; i++) { + if (!rte_lcore_is_enabled(i)) + continue; if (input_cores_mask & (1ULL << i)) { /* Skip ports that are not enabled */ while ((ports_mask & (1 << rx_port)) == 0) { diff --git a/examples/performance-thread/common/lthread.c b/examples/performance-thread/common/lthread.c index 062275a4..77c870fc 100644 --- a/examples/performance-thread/common/lthread.c +++ b/examples/performance-thread/common/lthread.c @@ -327,13 +327,14 @@ struct lthread *lthread_current(void) /* * Tasklet to cancel a thread */ -static void +static void * _cancel(void *arg) { struct lthread *lt = (struct lthread *) arg; lt->state |= BIT(ST_LT_CANCELLED); lthread_detach(); + return NULL; } diff --git a/examples/performance-thread/common/lthread_api.h b/examples/performance-thread/common/lthread_api.h index ec976103..903ca7c0 100644 --- a/examples/performance-thread/common/lthread_api.h +++ b/examples/performance-thread/common/lthread_api.h @@ -139,7 +139,7 @@ struct lthread_mutex; struct lthread_condattr; struct lthread_mutexattr; -typedef void (*lthread_func_t) (void *); +typedef void *(*lthread_func_t) (void *); /* * Define the size of stack for an lthread diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c index dd403ca8..125d976c 100644 --- a/examples/performance-thread/l3fwd-thread/main.c +++ b/examples/performance-thread/l3fwd-thread/main.c @@ -1981,17 +1981,18 @@ cpu_load_collector(__rte_unused void *arg) { * * This loop is used to start empty scheduler on lcore. */ -static void +static void * lthread_null(__rte_unused void *args) { int lcore_id = rte_lcore_id(); RTE_LOG(INFO, L3FWD, "Starting scheduler on lcore %d.\n", lcore_id); lthread_exit(NULL); + return NULL; } /* main processing loop */ -static void +static void * lthread_tx_per_ring(void *dummy) { int nb_rx; @@ -2036,6 +2037,7 @@ lthread_tx_per_ring(void *dummy) lthread_cond_wait(ready, 0); } + return NULL; } /* @@ -2044,7 +2046,7 @@ lthread_tx_per_ring(void *dummy) * This lthread is used to spawn one new lthread per ring from producers. * */ -static void +static void * lthread_tx(void *args) { struct lthread *lt; @@ -2089,9 +2091,10 @@ lthread_tx(void *args) } } + return NULL; } -static void +static void * lthread_rx(void *dummy) { int ret; @@ -2114,7 +2117,7 @@ lthread_rx(void *dummy) if (rx_conf->n_rx_queue == 0) { RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", rte_lcore_id()); - return; + return NULL; } RTE_LOG(INFO, L3FWD, "Entering main Rx loop on lcore %u\n", rte_lcore_id()); @@ -2185,6 +2188,7 @@ lthread_rx(void *dummy) lthread_yield(); } } + return NULL; } /* @@ -2193,8 +2197,9 @@ lthread_rx(void *dummy) * This lthread loop spawns all rx and tx lthreads on master lcore */ -static void -lthread_spawner(__rte_unused void *arg) { +static void * +lthread_spawner(__rte_unused void *arg) +{ struct lthread *lt[MAX_THREAD]; int i; int n_thread = 0; @@ -2235,6 +2240,7 @@ lthread_spawner(__rte_unused void *arg) { for (i = 0; i < n_thread; i++) lthread_join(lt[i], NULL); + return NULL; } /* diff --git a/examples/performance-thread/pthread_shim/main.c b/examples/performance-thread/pthread_shim/main.c index febae39b..5811cff8 100644 --- a/examples/performance-thread/pthread_shim/main.c +++ b/examples/performance-thread/pthread_shim/main.c @@ -149,8 +149,7 @@ void *helloworld_pthread(void *arg) */ __thread pthread_t tid[HELLOW_WORLD_MAX_LTHREADS]; -static void initial_lthread(void *args); -static void initial_lthread(void *args __attribute__((unused))) +static void *initial_lthread(void *args __attribute__((unused))) { int lcore = (int) rte_lcore_id(); /* @@ -225,6 +224,7 @@ static void initial_lthread(void *args __attribute__((unused))) /* shutdown the lthread scheduler */ lthread_scheduler_shutdown(rte_lcore_id()); lthread_detach(); + return NULL; } @@ -235,8 +235,6 @@ static void initial_lthread(void *args __attribute__((unused))) * in the core mask */ static int -lthread_scheduler(void *args); -static int lthread_scheduler(void *args __attribute__((unused))) { /* create initial thread */ diff --git a/examples/performance-thread/pthread_shim/pthread_shim.c b/examples/performance-thread/pthread_shim/pthread_shim.c index 113bafa0..7e6d7230 100644 --- a/examples/performance-thread/pthread_shim/pthread_shim.c +++ b/examples/performance-thread/pthread_shim/pthread_shim.c @@ -397,7 +397,7 @@ int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex) int pthread_create(pthread_t *__restrict tid, const pthread_attr_t *__restrict attr, - void *(func) (void *), + lthread_func_t func, void *__restrict arg) { if (override) { @@ -422,7 +422,7 @@ pthread_create(pthread_t *__restrict tid, } } return lthread_create((struct lthread **)tid, lcore, - (void (*)(void *))func, arg); + func, arg); } return _sys_pthread_funcs.f_pthread_create(tid, attr, func, arg); } diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c index b58d85b7..c5138690 100644 --- a/lib/librte_eal/common/eal_common_memzone.c +++ b/lib/librte_eal/common/eal_common_memzone.c @@ -253,7 +253,8 @@ memzone_reserve_aligned_thread_unsafe(const char *name, size_t len, snprintf(mz->name, sizeof(mz->name), "%s", name); mz->phys_addr = rte_malloc_virt2phy(mz_addr); mz->addr = mz_addr; - mz->len = (requested_len == 0 ? elem->size : requested_len); + mz->len = (requested_len == 0 ? + (elem->size - MALLOC_ELEM_OVERHEAD) : requested_len); mz->hugepage_sz = elem->ms->hugepage_sz; mz->socket_id = elem->ms->socket_id; mz->flags = 0; diff --git a/lib/librte_eal/common/eal_filesystem.h b/lib/librte_eal/common/eal_filesystem.h index 8acbd996..8f4f0200 100644 --- a/lib/librte_eal/common/eal_filesystem.h +++ b/lib/librte_eal/common/eal_filesystem.h @@ -86,8 +86,6 @@ eal_hugepage_info_path(void) /** String format for hugepage map files. */ #define HUGEFILE_FMT "%s/%smap_%d" -#define TEMP_HUGEFILE_FMT "%s/%smap_temp_%d" - static inline const char * eal_get_hugefile_path(char *buffer, size_t buflen, const char *hugedir, int f_id) { diff --git a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h index 37f5eff2..790e6fd7 100644 --- a/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h +++ b/lib/librte_eal/common/include/arch/ppc_64/rte_atomic.h @@ -55,7 +55,7 @@ extern "C" { * Guarantees that the LOAD and STORE operations generated before the * barrier occur before the LOAD and STORE operations generated after. */ -#define rte_mb() {asm volatile("sync" : : : "memory"); } +#define rte_mb() asm volatile("sync" : : : "memory") /** * Write memory barrier. diff --git a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h index 8e630c21..577236a3 100644 --- a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h +++ b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h @@ -104,10 +104,12 @@ static inline int rte_tm_supported(void) static inline int rte_try_tm(volatile int *lock) { + int retries; + if (!rte_rtm_supported) return 0; - int retries = RTE_RTM_MAX_RETRIES; + retries = RTE_RTM_MAX_RETRIES; while (likely(retries--)) { diff --git a/lib/librte_eal/common/include/generic/rte_rwlock.h b/lib/librte_eal/common/include/generic/rte_rwlock.h index 7a0fdc55..e57ce210 100644 --- a/lib/librte_eal/common/include/generic/rte_rwlock.h +++ b/lib/librte_eal/common/include/generic/rte_rwlock.h @@ -99,7 +99,7 @@ rte_rwlock_read_lock(rte_rwlock_t *rwl) continue; } success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, - x, x + 1); + (uint32_t)x, (uint32_t)(x + 1)); } } @@ -135,7 +135,7 @@ rte_rwlock_write_lock(rte_rwlock_t *rwl) continue; } success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, - 0, -1); + 0, (uint32_t)-1); } } diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h index db5ac91c..f5e2f886 100644 --- a/lib/librte_eal/common/include/rte_common.h +++ b/lib/librte_eal/common/include/rte_common.h @@ -105,7 +105,7 @@ typedef uint16_t unaligned_uint16_t; /*********** Macros for pointer arithmetic ********/ /** - * add a byte-value offset from a pointer + * add a byte-value offset to a pointer */ #define RTE_PTR_ADD(ptr, x) ((void*)((uintptr_t)(ptr) + (x))) @@ -323,7 +323,7 @@ rte_pause(void) {} static inline uint32_t rte_bsf32(uint32_t v) { - return __builtin_ctz(v); + return (uint32_t)__builtin_ctz(v); } #ifndef offsetof diff --git a/lib/librte_eal/common/include/rte_dev.h b/lib/librte_eal/common/include/rte_dev.h index dcf33d0e..2ad6da86 100644 --- a/lib/librte_eal/common/include/rte_dev.h +++ b/lib/librte_eal/common/include/rte_dev.h @@ -60,15 +60,18 @@ rte_pmd_debug_trace(const char *func_name, const char *fmt, ...) va_start(ap, fmt); - char buffer[vsnprintf(NULL, 0, fmt, ap) + 1]; + { + char buffer[vsnprintf(NULL, 0, fmt, ap) + 1]; - va_end(ap); + va_end(ap); - va_start(ap, fmt); - vsnprintf(buffer, sizeof(buffer), fmt, ap); - va_end(ap); + va_start(ap, fmt); + vsnprintf(buffer, sizeof(buffer), fmt, ap); + va_end(ap); - rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, "%s: %s", func_name, buffer); + rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, "%s: %s", + func_name, buffer); + } } /* diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h index fe7b5865..da48a94a 100644 --- a/lib/librte_eal/common/include/rte_lcore.h +++ b/lib/librte_eal/common/include/rte_lcore.h @@ -134,7 +134,7 @@ rte_lcore_index(int lcore_id) if (lcore_id >= RTE_MAX_LCORE) return -1; if (lcore_id < 0) - lcore_id = rte_lcore_id(); + lcore_id = (int)rte_lcore_id(); return lcore_config[lcore_id].core_index; } diff --git a/lib/librte_eal/common/include/rte_random.h b/lib/librte_eal/common/include/rte_random.h index 24ae8363..9f0e5e16 100644 --- a/lib/librte_eal/common/include/rte_random.h +++ b/lib/librte_eal/common/include/rte_random.h @@ -60,7 +60,7 @@ extern "C" { static inline void rte_srand(uint64_t seedval) { - srand48((long unsigned int)seedval); + srand48((long)seedval); } /** @@ -77,9 +77,9 @@ static inline uint64_t rte_rand(void) { uint64_t val; - val = lrand48(); + val = (uint64_t)lrand48(); val <<= 32; - val += lrand48(); + val += (uint64_t)lrand48(); return val; } diff --git a/lib/librte_eal/common/include/rte_string_fns.h b/lib/librte_eal/common/include/rte_string_fns.h index cfca2f8d..7c0ab15a 100644 --- a/lib/librte_eal/common/include/rte_string_fns.h +++ b/lib/librte_eal/common/include/rte_string_fns.h @@ -44,6 +44,8 @@ extern "C" { #endif +#include <stdio.h> + /** * Takes string "string" parameter and splits it at character "delim" * up to maxtokens-1 times - to give "maxtokens" resulting tokens. Like @@ -74,6 +76,35 @@ int rte_strsplit(char *string, int stringlen, char **tokens, int maxtokens, char delim); +/** + * @internal + * DPDK-specific version of strlcpy for systems without + * libc or libbsd copies of the function + */ +static inline size_t +rte_strlcpy(char *dst, const char *src, size_t size) +{ + return snprintf(dst, size, "%s", src); +} + +/* pull in a strlcpy function */ +#ifdef RTE_EXEC_ENV_BSDAPP +#include <string.h> +#ifndef __BSD_VISIBLE /* non-standard functions are hidden */ +#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size) +#endif + + +#else /* non-BSD platforms */ +#ifdef RTE_USE_LIBBSD +#include <bsd/string.h> + +#else /* no BSD header files, create own */ +#define strlcpy(dst, src, size) rte_strlcpy(dst, src, size) + +#endif /* RTE_USE_LIBBSD */ +#endif /* BSDAPP */ + #ifdef __cplusplus } #endif diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h index 17a00538..ea8ba3be 100644 --- a/lib/librte_eal/common/include/rte_version.h +++ b/lib/librte_eal/common/include/rte_version.h @@ -66,7 +66,7 @@ extern "C" { /** * Patch level number i.e. the z in yy.mm.z */ -#define RTE_VER_MINOR 6 +#define RTE_VER_MINOR 7 /** * Extra string to be appended to version number diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/lib/librte_eal/linuxapp/eal/eal_pci.c index b0d0c3c6..02ec2415 100644 --- a/lib/librte_eal/linuxapp/eal/eal_pci.c +++ b/lib/librte_eal/linuxapp/eal/eal_pci.c @@ -94,7 +94,8 @@ error: } static int -pci_get_kernel_driver_by_path(const char *filename, char *dri_name) +pci_get_kernel_driver_by_path(const char *filename, char *dri_name, + size_t len) { int count; char path[PATH_MAX]; @@ -115,7 +116,7 @@ pci_get_kernel_driver_by_path(const char *filename, char *dri_name) name = strrchr(path, '/'); if (name) { - strncpy(dri_name, name + 1, strlen(name + 1) + 1); + strlcpy(dri_name, name + 1, len); return 0; } @@ -369,7 +370,7 @@ pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus, /* parse driver */ snprintf(filename, sizeof(filename), "%s/driver", dirname); - ret = pci_get_kernel_driver_by_path(filename, driver); + ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver)); if (ret < 0) { RTE_LOG(ERR, EAL, "Fail to get kernel driver\n"); free(dev); diff --git a/lib/librte_eal/linuxapp/kni/compat.h b/lib/librte_eal/linuxapp/kni/compat.h index 3f8c0bc8..6a6968d9 100644 --- a/lib/librte_eal/linuxapp/kni/compat.h +++ b/lib/librte_eal/linuxapp/kni/compat.h @@ -101,6 +101,11 @@ #undef NET_NAME_UNKNOWN #endif +#if (defined(RHEL_RELEASE_CODE) && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 5))) +#define ndo_change_mtu ndo_change_mtu_rh74 +#endif + #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) #define HAVE_SIGNAL_FUNCTIONS_OWN_HEADER #endif diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 4679dc69..a698fc00 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -438,8 +438,7 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id) for (i = 0; i < RTE_MAX_ETHPORTS; i++) { - if (!strncmp(name, - rte_eth_dev_data[i].name, strlen(name))) { + if (!strcmp(name, rte_eth_dev_data[i].name)) { *port_id = i; @@ -628,6 +627,12 @@ rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); dev = &rte_eth_devices[port_id]; + if (!dev->data->dev_started) { + RTE_PMD_DEBUG_TRACE( + "port %d must be started before start any queue\n", port_id); + return -EINVAL; + } + if (rx_queue_id >= dev->data->nb_rx_queues) { RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id); return -EINVAL; @@ -680,6 +685,12 @@ rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); dev = &rte_eth_devices[port_id]; + if (!dev->data->dev_started) { + RTE_PMD_DEBUG_TRACE( + "port %d must be started before start any queue\n", port_id); + return -EINVAL; + } + if (tx_queue_id >= dev->data->nb_tx_queues) { RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id); return -EINVAL; diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h index dc6d0cc9..da33b9d5 100644 --- a/lib/librte_ether/rte_ethdev.h +++ b/lib/librte_ether/rte_ethdev.h @@ -2646,6 +2646,7 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts) { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + uint16_t nb_rx; #ifdef RTE_LIBRTE_ETHDEV_DEBUG RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); @@ -2656,13 +2657,14 @@ rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id, return 0; } #endif - int16_t nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], + nb_rx = (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], rx_pkts, nb_pkts); #ifdef RTE_ETHDEV_RXTX_CALLBACKS - struct rte_eth_rxtx_callback *cb = dev->post_rx_burst_cbs[queue_id]; + if (unlikely(dev->post_rx_burst_cbs[queue_id] != NULL)) { + struct rte_eth_rxtx_callback *cb = + dev->post_rx_burst_cbs[queue_id]; - if (unlikely(cb != NULL)) { do { nb_rx = cb->fn.rx(port_id, queue_id, rx_pkts, nb_rx, nb_pkts, cb->param); @@ -2692,7 +2694,7 @@ rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id) struct rte_eth_dev *dev = &rte_eth_devices[port_id]; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP); - return (*dev->dev_ops->rx_queue_count)(dev, queue_id); + return (int)(*dev->dev_ops->rx_queue_count)(dev, queue_id); } /** @@ -2886,8 +2888,9 @@ rte_eth_tx_buffer_flush(uint8_t port_id, uint16_t queue_id, /* All packets sent, or to be dealt with by callback below */ if (unlikely(sent != to_send)) - buffer->error_callback(&buffer->pkts[sent], to_send - sent, - buffer->error_userdata); + buffer->error_callback(&buffer->pkts[sent], + (uint16_t)(to_send - sent), + buffer->error_userdata); return sent; } @@ -4345,9 +4348,9 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id); * Get the device name from port id * * @param port_id -* pointer to port identifier of the device +* Port identifier of the device. * @param name -* pci address or name of the device +* Buffer of size RTE_ETH_NAME_MAX_LEN to store the name. * @return * - (0) if successful. * - (-EINVAL) on failure. diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h index 622c1800..f29de81a 100644 --- a/lib/librte_hash/rte_hash.h +++ b/lib/librte_hash/rte_hash.h @@ -357,7 +357,7 @@ rte_hash_lookup(const struct rte_hash *h, const void *key); * @param key * Key to find. * @param sig - * Hash value to remove from the hash table. + * Precomputed hash value for 'key'. * @return * - -EINVAL if the parameters are invalid. * - -ENOENT if the key is not found. diff --git a/lib/librte_hash/rte_hash_crc.h b/lib/librte_hash/rte_hash_crc.h index 63e74aa4..2aec6c5f 100644 --- a/lib/librte_hash/rte_hash_crc.h +++ b/lib/librte_hash/rte_hash_crc.h @@ -366,14 +366,13 @@ crc32c_1word(uint32_t data, uint32_t init_val) static inline uint32_t crc32c_2words(uint64_t data, uint32_t init_val) { + uint32_t crc, term1, term2; union { uint64_t u64; uint32_t u32[2]; } d; d.u64 = data; - uint32_t crc, term1, term2; - crc = init_val; crc ^= d.u32[0]; @@ -427,9 +426,9 @@ crc32c_sse42_u64_mimic(uint64_t data, uint64_t init_val) } d; d.u64 = data; - init_val = crc32c_sse42_u32(d.u32[0], init_val); - init_val = crc32c_sse42_u32(d.u32[1], init_val); - return init_val; + init_val = crc32c_sse42_u32(d.u32[0], (uint32_t)init_val); + init_val = crc32c_sse42_u32(d.u32[1], (uint32_t)init_val); + return (uint32_t)init_val; } #endif @@ -441,7 +440,7 @@ crc32c_sse42_u64(uint64_t data, uint64_t init_val) "crc32q %[data], %[init_val];" : [init_val] "+r" (init_val) : [data] "rm" (data)); - return init_val; + return (uint32_t)init_val; } #endif diff --git a/lib/librte_ip_frag/ip_frag_internal.c b/lib/librte_ip_frag/ip_frag_internal.c index b679ff43..2cee6f8a 100644 --- a/lib/librte_ip_frag/ip_frag_internal.c +++ b/lib/librte_ip_frag/ip_frag_internal.c @@ -183,7 +183,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr, fp->frags[IP_LAST_FRAG_IDX].len); else IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n" - "ipv4_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, " + "ipv6_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, " "total_size: %u, frag_size: %u, last_idx: %u\n" "first fragment: ofs: %u, len: %u\n" "last fragment: ofs: %u, len: %u\n\n", @@ -241,7 +241,7 @@ ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr, fp->frags[IP_LAST_FRAG_IDX].len); else IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n" - "ipv4_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, " + "ipv6_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, " "total_size: %u, frag_size: %u, last_idx: %u\n" "first fragment: ofs: %u, len: %u\n" "last fragment: ofs: %u, len: %u\n\n", @@ -362,7 +362,7 @@ ip_frag_lookup(struct rte_ip_frag_tbl *tbl, if (p1->key.key_len == IPV4_KEYLEN) IP_FRAG_LOG(DEBUG, "%s:%d:\n" "tbl: %p, max_entries: %u, use_entries: %u\n" - "ipv6_frag_pkt line0: %p, index: %u from %u\n" + "ipv4_frag_pkt line0: %p, index: %u from %u\n" "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n", __func__, __LINE__, tbl, tbl->max_entries, tbl->use_entries, @@ -388,7 +388,7 @@ ip_frag_lookup(struct rte_ip_frag_tbl *tbl, if (p2->key.key_len == IPV4_KEYLEN) IP_FRAG_LOG(DEBUG, "%s:%d:\n" "tbl: %p, max_entries: %u, use_entries: %u\n" - "ipv6_frag_pkt line1: %p, index: %u from %u\n" + "ipv4_frag_pkt line1: %p, index: %u from %u\n" "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n", __func__, __LINE__, tbl, tbl->max_entries, tbl->use_entries, diff --git a/lib/librte_ip_frag/rte_ipv4_reassembly.c b/lib/librte_ip_frag/rte_ipv4_reassembly.c index e084ca59..847ea0d6 100644 --- a/lib/librte_ip_frag/rte_ipv4_reassembly.c +++ b/lib/librte_ip_frag/rte_ipv4_reassembly.c @@ -88,7 +88,9 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp) /* chain with the first fragment. */ rte_pktmbuf_adj(m, (uint16_t)(m->l2_len + m->l3_len)); rte_pktmbuf_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m); + fp->frags[curr_idx].mb = NULL; m = fp->frags[IP_FIRST_FRAG_IDX].mb; + fp->frags[IP_FIRST_FRAG_IDX].mb = NULL; /* update mbuf fields for reassembled packet. */ m->ol_flags |= PKT_TX_IP_CKSUM; diff --git a/lib/librte_ip_frag/rte_ipv6_reassembly.c b/lib/librte_ip_frag/rte_ipv6_reassembly.c index 21a5ef5d..d9b5d690 100644 --- a/lib/librte_ip_frag/rte_ipv6_reassembly.c +++ b/lib/librte_ip_frag/rte_ipv6_reassembly.c @@ -111,7 +111,9 @@ ipv6_frag_reassemble(struct ip_frag_pkt *fp) /* chain with the first fragment. */ rte_pktmbuf_adj(m, (uint16_t)(m->l2_len + m->l3_len)); rte_pktmbuf_chain(fp->frags[IP_FIRST_FRAG_IDX].mb, m); + fp->frags[curr_idx].mb = NULL; m = fp->frags[IP_FIRST_FRAG_IDX].mb; + fp->frags[IP_FIRST_FRAG_IDX].mb = NULL; /* update mbuf fields for reassembled packet. */ m->ol_flags |= PKT_TX_IP_CKSUM; diff --git a/lib/librte_kvargs/rte_kvargs.c b/lib/librte_kvargs/rte_kvargs.c index 8d56abd4..5504c6df 100644 --- a/lib/librte_kvargs/rte_kvargs.c +++ b/lib/librte_kvargs/rte_kvargs.c @@ -41,7 +41,7 @@ /* * Receive a string with a list of arguments following the pattern - * key=value;key=value;... and insert them into the list. + * key=value,key=value,... and insert them into the list. * strtok() is used so the params string will be copied to be modified. */ static int @@ -182,7 +182,7 @@ rte_kvargs_free(struct rte_kvargs *kvlist) } /* - * Parse the arguments "key=value;key=value;..." string and return + * Parse the arguments "key=value,key=value,..." string and return * an allocated structure that contains a key/value list. Also * check if only valid keys were used. */ diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h index bc015d03..5b754645 100644 --- a/lib/librte_mbuf/rte_mbuf.h +++ b/lib/librte_mbuf/rte_mbuf.h @@ -204,12 +204,8 @@ extern "C" { * - set the PKT_TX_TCP_SEG flag in mbuf->ol_flags (this flag implies * PKT_TX_TCP_CKSUM) * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6 - * - if it's IPv4, set the PKT_TX_IP_CKSUM flag and write the IP checksum - * to 0 in the packet + * - if it's IPv4, set the PKT_TX_IP_CKSUM flag * - fill the mbuf offload information: l2_len, l3_len, l4_len, tso_segsz - * - calculate the pseudo header checksum without taking ip_len in account, - * and set it in the TCP header. Refer to rte_ipv4_phdr_cksum() and - * rte_ipv6_phdr_cksum() that can be used as helpers. */ #define PKT_TX_TCP_SEG (1ULL << 50) @@ -222,9 +218,6 @@ extern "C" { * - fill l2_len and l3_len in mbuf * - set the flags PKT_TX_TCP_CKSUM, PKT_TX_SCTP_CKSUM or PKT_TX_UDP_CKSUM * - set the flag PKT_TX_IPV4 or PKT_TX_IPV6 - * - calculate the pseudo header checksum and set it in the L4 header (only - * for TCP or UDP). See rte_ipv4_phdr_cksum() and rte_ipv6_phdr_cksum(). - * For SCTP, set the crc field to 0. */ #define PKT_TX_L4_NO_CKSUM (0ULL << 52) /**< Disable L4 cksum of TX pkt. */ #define PKT_TX_TCP_CKSUM (1ULL << 52) /**< TCP cksum of TX pkt. computed by NIC. */ @@ -236,7 +229,6 @@ extern "C" { * Offload the IP checksum in the hardware. The flag PKT_TX_IPV4 should * also be set by the application, although a PMD will only check * PKT_TX_IP_CKSUM. - * - set the IP checksum field in the packet to 0 * - fill the mbuf offload information: l2_len, l3_len */ #define PKT_TX_IP_CKSUM (1ULL << 54) @@ -261,10 +253,8 @@ extern "C" { /** * Offload the IP checksum of an external header in the hardware. The - * flag PKT_TX_OUTER_IPV4 should also be set by the application, alto ugh - * a PMD will only check PKT_TX_IP_CKSUM. The IP checksum field in the - * packet must be set to 0. - * - set the outer IP checksum field in the packet to 0 + * flag PKT_TX_OUTER_IPV4 should also be set by the application, although + * a PMD will only check PKT_TX_OUTER_IP_CKSUM. * - fill the mbuf offload information: outer_l2_len, outer_l3_len */ #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58) @@ -655,7 +645,7 @@ rte_mbuf_refcnt_read(const struct rte_mbuf *m) static inline void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value) { - rte_atomic16_set(&m->refcnt_atomic, new_value); + rte_atomic16_set(&m->refcnt_atomic, (int16_t)new_value); } /** @@ -678,8 +668,9 @@ rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) * reference counter can occur. */ if (likely(rte_mbuf_refcnt_read(m) == 1)) { - rte_mbuf_refcnt_set(m, 1 + value); - return 1 + value; + ++value; + rte_mbuf_refcnt_set(m, (uint16_t)value); + return (uint16_t)value; } return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value)); @@ -998,7 +989,8 @@ rte_pktmbuf_priv_size(struct rte_mempool *mp) */ static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m) { - m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len); + m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM, + (uint16_t)m->buf_len); } /** @@ -1176,10 +1168,11 @@ static inline void rte_pktmbuf_detach(struct rte_mbuf *m) { struct rte_mbuf *md = rte_mbuf_from_indirect(m); struct rte_mempool *mp = m->pool; - uint32_t mbuf_size, buf_len, priv_size; + uint32_t mbuf_size, buf_len; + uint16_t priv_size; priv_size = rte_pktmbuf_priv_size(mp); - mbuf_size = sizeof(struct rte_mbuf) + priv_size; + mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size); buf_len = rte_pktmbuf_data_room_size(mp); m->priv_size = priv_size; @@ -1463,7 +1456,10 @@ static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m, if (unlikely(len > rte_pktmbuf_headroom(m))) return NULL; - m->data_off -= len; + /* NB: elaborating the subtraction like this instead of using + * -= allows us to ensure the result type is uint16_t + * avoiding compiler warnings on gcc 8.1 at least */ + m->data_off = (uint16_t)(m->data_off - len); m->data_len = (uint16_t)(m->data_len + len); m->pkt_len = (m->pkt_len + len); @@ -1523,8 +1519,11 @@ static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len) if (unlikely(len > m->data_len)) return NULL; + /* NB: elaborating the addition like this instead of using + * += allows us to ensure the result type is uint16_t + * avoiding compiler warnings on gcc 8.1 at least */ m->data_len = (uint16_t)(m->data_len - len); - m->data_off += len; + m->data_off = (uint16_t)(m->data_off + len); m->pkt_len = (m->pkt_len - len); return (char *)m->buf_addr + m->data_off; } @@ -1636,7 +1635,10 @@ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail cur_tail = rte_pktmbuf_lastseg(head); cur_tail->next = tail; - /* accumulate number of segments and total length. */ + /* accumulate number of segments and total length. + * NB: elaborating the addition like this instead of using + * -= allows us to ensure the result type is uint16_t + * avoiding compiler warnings on gcc 8.1 at least */ head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs); head->pkt_len += tail->pkt_len; diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index aa513b97..3cb9e42c 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -396,12 +396,18 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, } /* not enough room to store one object */ - if (i == 0) - return -EINVAL; + if (i == 0) { + ret = -EINVAL; + goto fail; + } STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next); mp->nb_mem_chunks++; return i; + +fail: + rte_free(memhdr); + return ret; } /* Add objects in the pool, using a table of physical pages. Return the @@ -456,9 +462,6 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t off, phys_len; int ret, cnt = 0; - /* mempool must not be populated */ - if (mp->nb_mem_chunks != 0) - return -EEXIST; /* address and len must be page-aligned */ if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr) return -EINVAL; @@ -622,7 +625,7 @@ rte_mempool_populate_anon(struct rte_mempool *mp) char *addr; /* mempool is already populated, error */ - if (!STAILQ_EMPTY(&mp->mem_list)) { + if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) { rte_errno = EINVAL; return 0; } diff --git a/lib/librte_net/rte_ether.h b/lib/librte_net/rte_ether.h index 5edf66c3..796fd56d 100644 --- a/lib/librte_net/rte_ether.h +++ b/lib/librte_net/rte_ether.h @@ -239,7 +239,7 @@ static inline void eth_random_addr(uint8_t *addr) uint8_t *p = (uint8_t *)&rand; rte_memcpy(addr, p, ETHER_ADDR_LEN); - addr[0] &= ~ETHER_GROUP_ADDR; /* clear multicast bit */ + addr[0] &= (uint8_t)~ETHER_GROUP_ADDR; /* clear multicast bit */ addr[0] |= ETHER_LOCAL_ADMIN_ADDR; /* set local assignment bit */ } @@ -352,11 +352,12 @@ static inline int rte_vlan_strip(struct rte_mbuf *m) { struct ether_hdr *eh = rte_pktmbuf_mtod(m, struct ether_hdr *); + struct vlan_hdr *vh; if (eh->ether_type != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) return -1; - struct vlan_hdr *vh = (struct vlan_hdr *)(eh + 1); + vh = (struct vlan_hdr *)(eh + 1); m->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; m->vlan_tci = rte_be_to_cpu_16(vh->vlan_tci); diff --git a/lib/librte_net/rte_ip.h b/lib/librte_net/rte_ip.h index 4491b86e..19d6c95c 100644 --- a/lib/librte_net/rte_ip.h +++ b/lib/librte_net/rte_ip.h @@ -284,7 +284,7 @@ rte_raw_cksum_mbuf(const struct rte_mbuf *m, uint32_t off, uint32_t len, for (;;) { tmp = __rte_raw_cksum(buf, seglen, 0); if (done & 1) - tmp = rte_bswap16(tmp); + tmp = rte_bswap16((uint16_t)tmp); sum += tmp; done += seglen; if (done == len) @@ -315,7 +315,7 @@ rte_ipv4_cksum(const struct ipv4_hdr *ipv4_hdr) { uint16_t cksum; cksum = rte_raw_cksum(ipv4_hdr, sizeof(struct ipv4_hdr)); - return (cksum == 0xffff) ? cksum : ~cksum; + return (cksum == 0xffff) ? cksum : (uint16_t)~cksum; } /** @@ -380,8 +380,8 @@ rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr) uint32_t cksum; uint32_t l4_len; - l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - - sizeof(struct ipv4_hdr); + l4_len = (uint32_t)(rte_be_to_cpu_16(ipv4_hdr->total_length) - + sizeof(struct ipv4_hdr)); cksum = rte_raw_cksum(l4_hdr, l4_len); cksum += rte_ipv4_phdr_cksum(ipv4_hdr, 0); @@ -391,7 +391,7 @@ rte_ipv4_udptcp_cksum(const struct ipv4_hdr *ipv4_hdr, const void *l4_hdr) if (cksum == 0) cksum = 0xffff; - return cksum; + return (uint16_t)cksum; } /** @@ -431,7 +431,7 @@ rte_ipv6_phdr_cksum(const struct ipv6_hdr *ipv6_hdr, uint64_t ol_flags) uint32_t proto; /* L4 protocol - top 3 bytes must be zero */ } psd_hdr; - psd_hdr.proto = (ipv6_hdr->proto << 24); + psd_hdr.proto = (uint32_t)(ipv6_hdr->proto << 24); if (ol_flags & PKT_TX_TCP_SEG) { psd_hdr.len = 0; } else { @@ -474,7 +474,7 @@ rte_ipv6_udptcp_cksum(const struct ipv6_hdr *ipv6_hdr, const void *l4_hdr) if (cksum == 0) cksum = 0xffff; - return cksum; + return (uint16_t)cksum; } #ifdef __cplusplus diff --git a/lib/librte_vhost/fd_man.c b/lib/librte_vhost/fd_man.c index 8a075da2..b76b8bce 100644 --- a/lib/librte_vhost/fd_man.c +++ b/lib/librte_vhost/fd_man.c @@ -192,6 +192,38 @@ fdset_del(struct fdset *pfdset, int fd) return dat; } +/** + * Unregister the fd from the fdset. + * + * If parameters are invalid, return directly -2. + * And check whether fd is busy, if yes, return -1. + * Otherwise, try to delete the fd from fdset and + * return true. + */ +int +fdset_try_del(struct fdset *pfdset, int fd) +{ + int i; + + if (pfdset == NULL || fd == -1) + return -2; + + pthread_mutex_lock(&pfdset->fd_mutex); + i = fdset_find_fd(pfdset, fd); + if (i != -1 && pfdset->fd[i].busy) { + pthread_mutex_unlock(&pfdset->fd_mutex); + return -1; + } + + if (i != -1) { + pfdset->fd[i].fd = -1; + pfdset->fd[i].rcb = pfdset->fd[i].wcb = NULL; + pfdset->fd[i].dat = NULL; + } + + pthread_mutex_unlock(&pfdset->fd_mutex); + return 0; +} /** * This functions runs in infinite blocking loop until there is no fd in @@ -275,7 +307,7 @@ fdset_event_dispatch(struct fdset *pfdset) * because the fd is closed in the cb, * the old fd val could be reused by when creates new * listen fd in another thread, we couldn't call - * fd_set_del. + * fdset_del. */ if (remove1 || remove2) { pfdentry->fd = -1; diff --git a/lib/librte_vhost/fd_man.h b/lib/librte_vhost/fd_man.h index d319cac6..20781c02 100644 --- a/lib/librte_vhost/fd_man.h +++ b/lib/librte_vhost/fd_man.h @@ -63,6 +63,7 @@ int fdset_add(struct fdset *pfdset, int fd, fd_cb rcb, fd_cb wcb, void *dat); void *fdset_del(struct fdset *pfdset, int fd); +int fdset_try_del(struct fdset *pfdset, int fd); void fdset_event_dispatch(struct fdset *pfdset); diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c index a95fbfbb..805b2e5b 100644 --- a/lib/librte_vhost/socket.c +++ b/lib/librte_vhost/socket.c @@ -166,6 +166,11 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) msgh.msg_control = control; msgh.msg_controllen = sizeof(control); cmsg = CMSG_FIRSTHDR(&msgh); + if (cmsg == NULL) { + RTE_LOG(ERR, VHOST_CONFIG, "cmsg == NULL\n"); + errno = EINVAL; + return -1; + } cmsg->cmsg_len = CMSG_LEN(fdsize); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; @@ -598,13 +603,25 @@ rte_vhost_driver_unregister(const char *path) vhost_user_remove_reconnect(vsocket); } +again: pthread_mutex_lock(&vsocket->conn_mutex); for (conn = TAILQ_FIRST(&vsocket->conn_list); conn != NULL; conn = next) { next = TAILQ_NEXT(conn, next); - fdset_del(&vhost_user.fdset, conn->connfd); + /* + * If r/wcb is executing, release the + * conn_mutex lock, and try again since + * the r/wcb may use the conn_mutex lock. + */ + if (fdset_try_del(&vhost_user.fdset, + conn->connfd) == -1) { + pthread_mutex_unlock( + &vsocket->conn_mutex); + goto again; + } + RTE_LOG(INFO, VHOST_CONFIG, "free connfd = %d for device '%s'\n", conn->connfd, path); diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index c49db0c0..abc5a908 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -52,6 +52,8 @@ #define BUF_VECTOR_MAX 256 +#define VHOST_LOG_CACHE_NR 32 + /** * Structure contains buffer address, length and descriptor index * from vring to do scatter RX. @@ -75,6 +77,14 @@ struct zcopy_mbuf { }; TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf); +/* + * Structure that contains the info for batched dirty logging. + */ +struct log_cache_entry { + uint32_t offset; + unsigned long val; +}; + /** * Structure contains variables relevant to RX/TX virtqueues. */ @@ -110,6 +120,9 @@ struct vhost_virtqueue { struct vring_used_elem *shadow_used_ring; uint16_t shadow_used_idx; + + struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR]; + uint16_t log_cache_nb_elem; } __rte_cache_aligned; /* Old kernels have no such macros defined */ @@ -195,7 +208,8 @@ struct virtio_memory { #ifdef RTE_LIBRTE_VHOST_DEBUG #define VHOST_MAX_PRINT_BUFF 6072 #define LOG_LEVEL RTE_LOG_DEBUG -#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args) +#define VHOST_LOG_DEBUG(log_type, fmt, args...) \ + RTE_LOG(DEBUG, log_type, fmt, ##args) #define PRINT_PACKET(device, addr, size, header) do { \ char *pkt_addr = (char *)(addr); \ unsigned int index; \ @@ -211,11 +225,11 @@ struct virtio_memory { } \ snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \ \ - LOG_DEBUG(VHOST_DATA, "%s", packet); \ + VHOST_LOG_DEBUG(VHOST_DATA, "%s", packet); \ } while (0) #else #define LOG_LEVEL RTE_LOG_INFO -#define LOG_DEBUG(log_type, fmt, args...) do {} while (0) +#define VHOST_LOG_DEBUG(log_type, fmt, args...) do {} while (0) #define PRINT_PACKET(device, addr, size, header) do {} while (0) #endif diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index 550a1329..04c92ceb 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -169,7 +169,7 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features) } else { dev->vhost_hlen = sizeof(struct virtio_net_hdr); } - LOG_DEBUG(VHOST_CONFIG, + VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mergeable RX buffers %s, virtio 1 %s\n", dev->vid, (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off", @@ -396,13 +396,13 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, vq->log_guest_addr = addr->log_guest_addr; - LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n", + VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n", dev->vid, vq->desc); - LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n", + VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n", dev->vid, vq->avail); - LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n", + VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n", dev->vid, vq->used); - LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n", + VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n", dev->vid, vq->log_guest_addr); return 0; @@ -421,7 +421,7 @@ vhost_user_set_vring_base(struct virtio_net *dev, return 0; } -static void +static int add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, uint64_t host_phys_addr, uint64_t size) { @@ -431,6 +431,10 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, dev->max_guest_pages *= 2; dev->guest_pages = realloc(dev->guest_pages, dev->max_guest_pages * sizeof(*page)); + if (!dev->guest_pages) { + RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n"); + return -1; + } } if (dev->nr_guest_pages > 0) { @@ -439,7 +443,7 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, if (host_phys_addr == last_page->host_phys_addr + last_page->size) { last_page->size += size; - return; + return 0; } } @@ -447,9 +451,11 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, page->guest_phys_addr = guest_phys_addr; page->host_phys_addr = host_phys_addr; page->size = size; + + return 0; } -static void +static int add_guest_pages(struct virtio_net *dev, struct virtio_memory_region *reg, uint64_t page_size) { @@ -463,7 +469,9 @@ add_guest_pages(struct virtio_net *dev, struct virtio_memory_region *reg, size = page_size - (guest_phys_addr & (page_size - 1)); size = RTE_MIN(size, reg_size); - add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size); + if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0) + return -1; + host_user_addr += size; guest_phys_addr += size; reg_size -= size; @@ -472,12 +480,16 @@ add_guest_pages(struct virtio_net *dev, struct virtio_memory_region *reg, size = RTE_MIN(reg_size, page_size); host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t) host_user_addr); - add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size); + if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, + size) < 0) + return -1; host_user_addr += size; guest_phys_addr += size; reg_size -= size; } + + return 0; } #ifdef RTE_LIBRTE_VHOST_DEBUG @@ -624,7 +636,12 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) mmap_offset; if (dev->dequeue_zero_copy) - add_guest_pages(dev, reg, alignment); + if (add_guest_pages(dev, reg, alignment) < 0) { + RTE_LOG(ERR, VHOST_CONFIG, + "adding guest pages to region %u failed.\n", + i); + goto err_mmap; + } RTE_LOG(INFO, VHOST_CONFIG, "guest memory region %u, size: 0x%" PRIx64 "\n" @@ -869,7 +886,7 @@ vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg) * mmap from 0 to workaround a hugepage mmap bug: mmap will * fail when offset is not page size aligned. */ - addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); close(fd); if (addr == MAP_FAILED) { RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n"); diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 745cc53f..2932bade 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -58,7 +58,7 @@ static inline void __attribute__((always_inline)) vhost_set_bit(unsigned int nr, volatile uint8_t *addr) { - __sync_fetch_and_or_8(addr, (1U << nr)); + __sync_fetch_and_or_1(addr, (1U << nr)); } static inline void __attribute__((always_inline)) @@ -90,6 +90,93 @@ vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len) } static inline void __attribute__((always_inline)) +vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + unsigned long *log_base; + int i; + + if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) || + !dev->log_base)) + return; + + log_base = (unsigned long *)(uintptr_t)dev->log_base; + + /* + * It is expected a write memory barrier has been issued + * before this function is called. + */ + + for (i = 0; i < vq->log_cache_nb_elem; i++) { + struct log_cache_entry *elem = vq->log_cache + i; + + __sync_fetch_and_or(log_base + elem->offset, elem->val); + } + + rte_smp_wmb(); + + vq->log_cache_nb_elem = 0; +} + +static inline void __attribute__((always_inline)) +vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t page) +{ + uint32_t bit_nr = page % (sizeof(unsigned long) << 3); + uint32_t offset = page / (sizeof(unsigned long) << 3); + int i; + + for (i = 0; i < vq->log_cache_nb_elem; i++) { + struct log_cache_entry *elem = vq->log_cache + i; + + if (elem->offset == offset) { + elem->val |= (1UL << bit_nr); + return; + } + } + + if (unlikely(i >= VHOST_LOG_CACHE_NR)) { + /* + * No more room for a new log cache entry, + * so write the dirty log map directly. + */ + rte_smp_wmb(); + vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); + + return; + } + + vq->log_cache[i].offset = offset; + vq->log_cache[i].val = (1UL << bit_nr); +} + +static inline void __attribute__((always_inline)) +vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t addr, uint64_t len) +{ + uint64_t page; + + if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) || + !dev->log_base || !len)) + return; + + if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) + return; + + page = addr / VHOST_LOG_PAGE; + while (page * VHOST_LOG_PAGE < addr + len) { + vhost_log_cache_page(dev, vq, page); + page += 1; + } +} + +static inline void __attribute__((always_inline)) +vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t offset, uint64_t len) +{ + vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len); +} + +static inline void __attribute__((always_inline)) vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t offset, uint64_t len) { @@ -147,7 +234,7 @@ do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, rte_memcpy(&vq->used->ring[to], &vq->shadow_used_ring[from], size * sizeof(struct vring_used_elem)); - vhost_log_used_vring(dev, vq, + vhost_log_cache_used_vring(dev, vq, offsetof(struct vring_used, ring[to]), size * sizeof(struct vring_used_elem)); } @@ -175,6 +262,8 @@ flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq) rte_smp_wmb(); + vhost_log_cache_sync(dev, vq); + *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx; vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx), sizeof(vq->used->idx)); @@ -249,8 +338,9 @@ copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr, } static inline int __attribute__((always_inline)) -copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, - struct rte_mbuf *m, uint16_t desc_idx, uint32_t size) +copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct vring_desc *descs, struct rte_mbuf *m, + uint16_t desc_idx, uint32_t size) { uint32_t desc_avail, desc_offset; uint32_t mbuf_avail, mbuf_offset; @@ -298,14 +388,14 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len); - PRINT_PACKET(dev, (uintptr_t)dst, len, 0); + PRINT_PACKET(dev, (uintptr_t)dst, (uint32_t)len, 0); remain -= len; guest_addr += len; dst += len; } } - vhost_log_write(dev, desc_gaddr, dev->vhost_hlen); + vhost_log_cache_write(dev, vq, desc_gaddr, dev->vhost_hlen); desc_avail = desc->len - dev->vhost_hlen; if (unlikely(desc_chunck_len < dev->vhost_hlen)) { @@ -368,7 +458,8 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)), rte_pktmbuf_mtod_offset(m, void *, mbuf_offset), cpy_len); - vhost_log_write(dev, desc_gaddr + desc_offset, cpy_len); + vhost_log_cache_write(dev, vq, desc_gaddr + desc_offset, + cpy_len); PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), cpy_len, 0); @@ -401,7 +492,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, uint32_t i, sz; uint64_t dlen; - LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); @@ -423,7 +514,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, if (count == 0) goto out_access_unlock; - LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n", dev->vid, start_idx, start_idx + count); /* Retrieve all of the desc indexes first to avoid caching issues. */ @@ -434,7 +525,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, vq->used->ring[used_idx].id = desc_indexes[i]; vq->used->ring[used_idx].len = pkts[i]->pkt_len + dev->vhost_hlen; - vhost_log_used_vring(dev, vq, + vhost_log_cache_used_vring(dev, vq, offsetof(struct vring_used, ring[used_idx]), sizeof(vq->used->ring[used_idx])); } @@ -474,11 +565,11 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, sz = vq->size; } - err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz); + err = copy_mbuf_to_desc(dev, vq, descs, pkts[i], desc_idx, sz); if (unlikely(err)) { used_idx = (start_idx + i) & (vq->size - 1); vq->used->ring[used_idx].len = dev->vhost_hlen; - vhost_log_used_vring(dev, vq, + vhost_log_cache_used_vring(dev, vq, offsetof(struct vring_used, ring[used_idx]), sizeof(vq->used->ring[used_idx])); } @@ -492,6 +583,8 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, rte_smp_wmb(); + vhost_log_cache_sync(dev, vq); + *(volatile uint16_t *)&vq->used->idx += count; vq->last_used_idx += count; vhost_log_used_vring(dev, vq, @@ -623,8 +716,9 @@ reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, } static inline int __attribute__((always_inline)) -copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, - struct buf_vector *buf_vec, uint16_t num_buffers) +copy_mbuf_to_desc_mergeable(struct virtio_net *dev, + struct vhost_virtqueue *vq, struct rte_mbuf *m, + struct buf_vector *buf_vec, uint16_t num_buffers) { struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0}; struct virtio_net_hdr_mrg_rxbuf *hdr; @@ -657,7 +751,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, rte_prefetch0((void *)(uintptr_t)hdr_addr); virtio_hdr.num_buffers = num_buffers; - LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n", dev->vid, num_buffers); desc_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen; @@ -736,14 +830,15 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, len); PRINT_PACKET(dev, (uintptr_t)dst, - len, 0); + (uint32_t)len, 0); remain -= len; guest_addr += len; dst += len; } } - vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen); + vhost_log_cache_write(dev, vq, hdr_phys_addr, + dev->vhost_hlen); PRINT_PACKET(dev, (uintptr_t)hdr_addr, dev->vhost_hlen, 0); @@ -754,7 +849,8 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)), rte_pktmbuf_mtod_offset(m, void *, mbuf_offset), cpy_len); - vhost_log_write(dev, desc_gaddr + desc_offset, cpy_len); + vhost_log_cache_write(dev, vq, desc_gaddr + desc_offset, + cpy_len); PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), cpy_len, 0); @@ -778,7 +874,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, struct buf_vector buf_vec[BUF_VECTOR_MAX]; uint16_t avail_head; - LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); @@ -806,18 +902,18 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, if (unlikely(reserve_avail_buf_mergeable(dev, vq, pkt_len, buf_vec, &num_buffers, avail_head) < 0)) { - LOG_DEBUG(VHOST_DATA, + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) failed to get enough desc from vring\n", dev->vid); vq->shadow_used_idx -= num_buffers; break; } - LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers); - if (copy_mbuf_to_desc_mergeable(dev, pkts[pkt_idx], + if (copy_mbuf_to_desc_mergeable(dev, vq, pkts[pkt_idx], buf_vec, num_buffers) < 0) { vq->shadow_used_idx -= num_buffers; break; @@ -1116,7 +1212,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset)); PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), - desc_chunck_len, 0); + (uint32_t)desc_chunck_len, 0); mbuf_offset = 0; mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM; @@ -1181,7 +1277,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, desc_avail = desc->len; PRINT_PACKET(dev, (uintptr_t)desc_addr, - desc_chunck_len, 0); + (uint32_t)desc_chunck_len, 0); } else if (unlikely(desc_chunck_len == 0)) { desc_chunck_len = desc_avail; desc_gaddr += desc_offset; @@ -1194,7 +1290,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, desc_offset = 0; PRINT_PACKET(dev, (uintptr_t)desc_addr, - desc_chunck_len, 0); + (uint32_t)desc_chunck_len, 0); } /* @@ -1237,7 +1333,7 @@ update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, { vq->used->ring[used_idx].id = desc_idx; vq->used->ring[used_idx].len = 0; - vhost_log_used_vring(dev, vq, + vhost_log_cache_used_vring(dev, vq, offsetof(struct vring_used, ring[used_idx]), sizeof(vq->used->ring[used_idx])); } @@ -1252,6 +1348,8 @@ update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq, rte_smp_wmb(); rte_smp_rmb(); + vhost_log_cache_sync(dev, vq); + vq->used->idx += count; vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx), sizeof(vq->used->idx)); @@ -1417,7 +1515,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (free_entries == 0) goto out_access_unlock; - LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); /* Prefetch available and used ring */ avail_idx = vq->last_avail_idx & (vq->size - 1); @@ -1427,7 +1525,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, count = RTE_MIN(count, MAX_PKT_BURST); count = RTE_MIN(count, free_entries); - LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n", dev->vid, count); /* Retrieve all of the head indexes first to avoid caching issues. */ diff --git a/mk/rte.app.mk b/mk/rte.app.mk index 725e01df..c7e7e7ec 100644 --- a/mk/rte.app.mk +++ b/mk/rte.app.mk @@ -155,6 +155,9 @@ ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),n) # The static libraries do not know their dependencies. # So linking with static library requires explicit dependencies. _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lrt +ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP)$(CONFIG_RTE_USE_LIBBSD),yy) +_LDLIBS-$(CONFIG_RTE_LIBRTE_EAL) += -lbsd +endif _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lm _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED) += -lrt _LDLIBS-$(CONFIG_RTE_LIBRTE_METER) += -lm diff --git a/pkg/dpdk.spec b/pkg/dpdk.spec index 3582066b..56c3a899 100644 --- a/pkg/dpdk.spec +++ b/pkg/dpdk.spec @@ -30,7 +30,7 @@ # OF THE POSSIBILITY OF SUCH DAMAGE. Name: dpdk -Version: 16.11.6 +Version: 16.11.7 Release: 1 Packager: packaging@6wind.com URL: http://dpdk.org |