diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:52:30 +0100 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:53:17 +0100 |
commit | b63264c8342e6a1b6971c79550d2af2024b6a4de (patch) | |
tree | 83114aac64286fe616506c0b3dfaec2ab86ef835 /drivers/net/e1000 | |
parent | ca33590b6af032bff57d9cc70455660466a654b2 (diff) |
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r-- | drivers/net/e1000/Makefile | 4 | ||||
-rw-r--r-- | drivers/net/e1000/base/e1000_82575.c | 5 | ||||
-rw-r--r-- | drivers/net/e1000/base/e1000_defines.h | 1 | ||||
-rw-r--r-- | drivers/net/e1000/base/e1000_phy.h | 8 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_ethdev.h | 27 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_logs.c | 26 | ||||
-rw-r--r-- | drivers/net/e1000/e1000_logs.h | 6 | ||||
-rw-r--r-- | drivers/net/e1000/em_ethdev.c | 146 | ||||
-rw-r--r-- | drivers/net/e1000/em_rxtx.c | 113 | ||||
-rw-r--r-- | drivers/net/e1000/igb_ethdev.c | 246 | ||||
-rw-r--r-- | drivers/net/e1000/igb_flow.c | 110 | ||||
-rw-r--r-- | drivers/net/e1000/igb_rxtx.c | 188 | ||||
-rw-r--r-- | drivers/net/e1000/meson.build | 1 |
13 files changed, 606 insertions, 275 deletions
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile index ba81a1f4..9c87e883 100644 --- a/drivers/net/e1000/Makefile +++ b/drivers/net/e1000/Makefile @@ -22,7 +22,8 @@ ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # -CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259 +CFLAGS_BASE_DRIVER = -diag-disable 177 -diag-disable 181 +CFLAGS_BASE_DRIVER += -diag-disable 869 -diag-disable 2259 else # # CFLAGS for gcc/clang @@ -61,6 +62,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_82575.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_i210.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_api.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_ich8lan.c +SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_logs.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mac.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_manage.c SRCS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000_mbx.c diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c index 15c7dd84..da1a9a70 100644 --- a/drivers/net/e1000/base/e1000_82575.c +++ b/drivers/net/e1000/base/e1000_82575.c @@ -312,6 +312,9 @@ STATIC s32 e1000_init_phy_params_82575(struct e1000_hw *hw) phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_none; + break; default: ret_val = -E1000_ERR_PHY; goto out; @@ -1607,6 +1610,8 @@ STATIC s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) case e1000_phy_82580: ret_val = e1000_copper_link_setup_82577(hw); break; + case e1000_phy_none: + break; default: ret_val = -E1000_ERR_PHY; break; diff --git a/drivers/net/e1000/base/e1000_defines.h b/drivers/net/e1000/base/e1000_defines.h index dbc2bbbe..e2101c17 100644 --- a/drivers/net/e1000/base/e1000_defines.h +++ b/drivers/net/e1000/base/e1000_defines.h @@ -1274,6 +1274,7 @@ POSSIBILITY OF SUCH DAMAGE. #define I350_I_PHY_ID 0x015403B0 #define I210_I_PHY_ID 0x01410C00 #define IGP04E1000_E_PHY_ID 0x02A80391 +#define BCM54616_E_PHY_ID 0x03625D10 #define M88_VENDOR 0x0141 /* M88E1000 Specific Registers */ diff --git a/drivers/net/e1000/base/e1000_phy.h b/drivers/net/e1000/base/e1000_phy.h index 3e45a9ef..2cd0e14b 100644 --- a/drivers/net/e1000/base/e1000_phy.h +++ b/drivers/net/e1000/base/e1000_phy.h @@ -330,4 +330,12 @@ struct sfp_e1000_flags { #define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 #define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 +/* EEPROM byte offsets */ +#define IGB_SFF_8472_SWAP 0x5C +#define IGB_SFF_8472_COMP 0x5E + +/* Bitmasks */ +#define IGB_SFF_ADDRESSING_MODE 0x4 +#define IGB_SFF_8472_UNSUP 0x00 + #endif diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index 23b089c8..902001f3 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -4,6 +4,10 @@ #ifndef _E1000_ETHDEV_H_ #define _E1000_ETHDEV_H_ + +#include <stdint.h> + +#include <rte_flow.h> #include <rte_time.h> #include <rte_pci.h> @@ -27,6 +31,7 @@ #define E1000_CTRL_EXT_EXTEND_VLAN (1<<26) /* EXTENDED VLAN */ #define IGB_VFTA_SIZE 128 +#define IGB_HKEY_MAX_INDEX 10 #define IGB_MAX_RX_QUEUE_NUM 8 #define IGB_MAX_RX_QUEUE_NUM_82576 16 @@ -229,8 +234,8 @@ struct igb_ethertype_filter { }; struct igb_rte_flow_rss_conf { - struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */ - uint16_t num; /**< Number of entries in queue[]. */ + struct rte_flow_action_rss conf; /**< RSS parameters. */ + uint8_t key[IGB_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */ uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ }; @@ -357,6 +362,9 @@ void eth_igb_rx_queue_release(void *rxq); void igb_dev_clear_queues(struct rte_eth_dev *dev); void igb_dev_free_queues(struct rte_eth_dev *dev); +uint64_t igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, @@ -370,6 +378,9 @@ int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset); int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset); int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset); +uint64_t igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); @@ -417,6 +428,8 @@ void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_txq_info *qinfo); +uint32_t em_get_max_pktlen(struct rte_eth_dev *dev); + /* * RX/TX EM function prototypes */ @@ -426,6 +439,9 @@ void eth_em_rx_queue_release(void *rxq); void em_dev_clear_queues(struct rte_eth_dev *dev); void em_dev_free_queues(struct rte_eth_dev *dev); +uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, @@ -439,6 +455,9 @@ int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset); int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset); int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset); +uint64_t em_get_tx_port_offloads_capa(struct rte_eth_dev *dev); +uint64_t em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev); + int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); @@ -487,6 +506,10 @@ int eth_igb_syn_filter_set(struct rte_eth_dev *dev, int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, struct rte_eth_flex_filter *filter, bool add); +int igb_rss_conf_init(struct igb_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in); +int igb_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with); int igb_config_rss_filter(struct rte_eth_dev *dev, struct igb_rte_flow_rss_conf *conf, bool add); diff --git a/drivers/net/e1000/e1000_logs.c b/drivers/net/e1000/e1000_logs.c new file mode 100644 index 00000000..22173939 --- /dev/null +++ b/drivers/net/e1000/e1000_logs.c @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include "e1000_logs.h" + +/* declared as extern in e1000_logs.h */ +int e1000_logtype_init; +int e1000_logtype_driver; + +/* avoids double registering of logs if EM and IGB drivers are in use */ +static int e1000_log_initialized; + +void +e1000_igb_init_log(void) +{ + if (!e1000_log_initialized) { + e1000_logtype_init = rte_log_register("pmd.net.e1000.init"); + if (e1000_logtype_init >= 0) + rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE); + e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver"); + if (e1000_logtype_driver >= 0) + rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE); + e1000_log_initialized = 1; + } +} diff --git a/drivers/net/e1000/e1000_logs.h b/drivers/net/e1000/e1000_logs.h index 50348e9e..69d3d311 100644 --- a/drivers/net/e1000/e1000_logs.h +++ b/drivers/net/e1000/e1000_logs.h @@ -5,6 +5,8 @@ #ifndef _E1000_LOGS_H_ #define _E1000_LOGS_H_ +#include <rte_log.h> + extern int e1000_logtype_init; #define PMD_INIT_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, e1000_logtype_init, \ @@ -41,4 +43,8 @@ extern int e1000_logtype_driver; #define PMD_DRV_LOG(level, fmt, args...) \ PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +/* log init function shared by e1000 and igb drivers */ +void e1000_igb_init_log(void); + #endif /* _E1000_LOGS_H_ */ diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c index 242375ff..053e855b 100644 --- a/drivers/net/e1000/em_ethdev.c +++ b/drivers/net/e1000/em_ethdev.c @@ -11,7 +11,6 @@ #include <rte_common.h> #include <rte_interrupts.h> #include <rte_byteorder.h> -#include <rte_log.h> #include <rte_debug.h> #include <rte_pci.h> #include <rte_bus_pci.h> @@ -20,7 +19,6 @@ #include <rte_ethdev_pci.h> #include <rte_memory.h> #include <rte_eal.h> -#include <rte_atomic.h> #include <rte_malloc.h> #include <rte_dev.h> @@ -94,6 +92,8 @@ static int em_get_rx_buffer_size(struct e1000_hw *hw); static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index); +static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *addr); static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, @@ -105,9 +105,6 @@ static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev, static enum e1000_fc_mode em_fc_setting = e1000_fc_full; -int e1000_logtype_init; -int e1000_logtype_driver; - /* * The set of PCI devices this driver supports */ @@ -190,6 +187,7 @@ static const struct eth_dev_ops eth_em_ops = { .dev_led_off = eth_em_led_off, .flow_ctrl_get = eth_em_flow_ctrl_get, .flow_ctrl_set = eth_em_flow_ctrl_set, + .mac_addr_set = eth_em_default_mac_addr_set, .mac_addr_add = eth_em_rar_set, .mac_addr_remove = eth_em_rar_clear, .set_mc_addr_list = eth_em_set_mc_addr_list, @@ -197,57 +195,6 @@ static const struct eth_dev_ops eth_em_ops = { .txq_info_get = em_txq_info_get, }; -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_em_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_em_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} /** * eth_em_dev_is_ich8 - Check for ICH8 device @@ -506,6 +453,7 @@ eth_em_configure(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + PMD_INIT_FUNC_TRACE(); return 0; @@ -802,7 +750,7 @@ eth_em_stop(struct rte_eth_dev *dev) /* clear the recorded link status */ memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -1069,9 +1017,11 @@ eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queu return 0; } -static uint32_t -em_get_max_pktlen(const struct e1000_hw *hw) +uint32_t +em_get_max_pktlen(struct rte_eth_dev *dev) { + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + switch (hw->mac.type) { case e1000_82571: case e1000_82572: @@ -1100,20 +1050,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ - dev_info->max_rx_pktlen = em_get_max_pktlen(hw); + dev_info->max_rx_pktlen = em_get_max_pktlen(dev); dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM; /* * Starting with 631xESB hw supports 2 TX/RX queues per port. @@ -1135,6 +1074,13 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = 1; dev_info->max_tx_queues = 1; + dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { .nb_max = E1000_MAX_RING_DESC, .nb_min = E1000_MIN_RING_DESC, @@ -1152,6 +1098,12 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M | ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G; + + /* Preferred queue parameters */ + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = 256; + dev_info->default_rxportconf.ring_size = 256; } /* return 0 means link status changed, -1 means not changed */ @@ -1160,7 +1112,7 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; int link_check, count; link_check = 0; @@ -1195,8 +1147,6 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) rte_delay_ms(EM_LINK_UPDATE_CHECK_INTERVAL); } memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_read_link_status(dev, &link); - old = link; /* Now we check if a transition has happened */ if (link_check && (link.link_status == ETH_LINK_DOWN)) { @@ -1210,19 +1160,13 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_autoneg = !(dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED); } else if (!link_check && (link.link_status == ETH_LINK_UP)) { - link.link_speed = 0; + link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; link.link_status = ETH_LINK_DOWN; link.link_autoneg = ETH_LINK_FIXED; } - rte_em_dev_atomic_write_link_status(dev, &link); - - /* not changed */ - if (old.link_status == link.link_status) - return -1; - /* changed */ - return 0; + return rte_eth_linkstatus_set(dev, &link); } /* @@ -1460,15 +1404,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev) static int eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; if(mask & ETH_VLAN_STRIP_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) em_vlan_hw_strip_enable(dev); else em_vlan_hw_strip_disable(dev); } if(mask & ETH_VLAN_FILTER_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) em_vlan_hw_filter_enable(dev); else em_vlan_hw_filter_disable(dev); @@ -1631,8 +1578,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev, if (ret < 0) return 0; - memset(&link, 0, sizeof(link)); - rte_em_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); + if (link.link_status) { PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", dev->data->port_id, link.link_speed, @@ -1810,6 +1757,15 @@ eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index) } static int +eth_em_default_mac_addr_set(struct rte_eth_dev *dev, + struct ether_addr *addr) +{ + eth_em_rar_clear(dev, 0); + + return eth_em_rar_set(dev, (void *)addr, 0, 0); +} + +static int eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct rte_eth_dev_info dev_info; @@ -1835,10 +1791,12 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; rctl |= E1000_RCTL_LPE; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; rctl &= ~E1000_RCTL_LPE; } E1000_WRITE_REG(hw, E1000_RCTL, rctl); @@ -1864,14 +1822,8 @@ RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio-pci"); -RTE_INIT(e1000_init_log); -static void -e1000_init_log(void) +/* see e1000_logs.c */ +RTE_INIT(igb_init_log) { - e1000_logtype_init = rte_log_register("pmd.net.e1000.init"); - if (e1000_logtype_init >= 0) - rte_log_set_level(e1000_logtype_init, RTE_LOG_NOTICE); - e1000_logtype_driver = rte_log_register("pmd.net.e1000.driver"); - if (e1000_logtype_driver >= 0) - rte_log_set_level(e1000_logtype_driver, RTE_LOG_NOTICE); + e1000_igb_init_log(); } diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 02fae100..7d2ac4eb 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -85,6 +85,7 @@ struct em_rx_queue { struct em_rx_entry *sw_ring; /**< address of RX software ring. */ struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */ uint16_t nb_rx_desc; /**< number of RX descriptors. */ uint16_t rx_tail; /**< current value of RDT register. */ uint16_t nb_rx_hold; /**< number of held free RX desc. */ @@ -163,6 +164,7 @@ struct em_tx_queue { uint8_t wthresh; /**< Write-back threshold register. */ struct em_ctx_info ctx_cache; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -1151,6 +1153,36 @@ em_reset_tx_queue(struct em_tx_queue *txq) memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache)); } +uint64_t +em_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_offload_capa; + + RTE_SET_USED(dev); + tx_offload_capa = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; + + return tx_offload_capa; +} + +uint64_t +em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t tx_queue_offload_capa; + + /* + * As only one Tx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev); + + return tx_queue_offload_capa; +} + int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1163,9 +1195,12 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, struct e1000_hw *hw; uint32_t tsize; uint16_t tx_rs_thresh, tx_free_thresh; + uint64_t offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; + /* * Validate number of transmit descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1269,6 +1304,7 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, em_reset_tx_queue(txq); dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; return 0; } @@ -1313,6 +1349,44 @@ em_reset_rx_queue(struct em_rx_queue *rxq) rxq->pkt_last_seg = NULL; } +uint64_t +em_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + uint32_t max_rx_pktlen; + + max_rx_pktlen = em_get_max_pktlen(dev); + + rx_offload_capa = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER; + if (max_rx_pktlen > ETHER_MAX_LEN) + rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME; + + return rx_offload_capa; +} + +uint64_t +em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_queue_offload_capa; + + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev); + + return rx_queue_offload_capa; +} + int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1325,9 +1399,12 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, struct em_rx_queue *rxq; struct e1000_hw *hw; uint32_t rsize; + uint64_t offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* * Validate number of receive descriptors. * It must not exceed hardware maximum, and must be multiple @@ -1382,8 +1459,10 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx)); @@ -1395,6 +1474,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, dev->data->rx_queues[queue_idx] = rxq; em_reset_rx_queue(rxq); + rxq->offloads = offloads; return 0; } @@ -1646,6 +1726,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) { struct e1000_hw *hw; struct em_rx_queue *rxq; + struct rte_eth_rxmode *rxmode; uint32_t rctl; uint32_t rfctl; uint32_t rxcsum; @@ -1654,6 +1735,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) int ret; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rxmode = &dev->data->dev_conf.rxmode; /* * Make sure receives are disabled while setting @@ -1713,9 +1795,10 @@ eth_em_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = - (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? - 0 : ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(i), @@ -1745,7 +1828,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) * to avoid splitting packets that don't fit into * one buffer. */ - if (dev->data->dev_conf.rxmode.jumbo_frame || + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME || rctl_bsize < ETHER_MAX_LEN) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); @@ -1755,7 +1838,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) } } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_em_recv_scattered_pkts; @@ -1768,7 +1851,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) */ rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); - if (dev->data->dev_conf.rxmode.hw_ip_checksum) + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) rxcsum |= E1000_RXCSUM_IPOFL; else rxcsum &= ~E1000_RXCSUM_IPOFL; @@ -1780,24 +1863,24 @@ eth_em_rx_init(struct rte_eth_dev *dev) if ((hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_pch2lan || hw->mac.type == e1000_ich10lan) && - dev->data->dev_conf.rxmode.jumbo_frame == 1) { + rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0)); E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3); E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13)); } if (hw->mac.type == e1000_pch2lan) { - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) e1000_lv_jumbo_workaround_ich8lan(hw, TRUE); else e1000_lv_jumbo_workaround_ich8lan(hw, FALSE); } /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) - rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - else + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + else + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | @@ -1814,7 +1897,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) /* * Configure support of jumbo frames, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) rctl |= E1000_RCTL_LPE; else rctl &= ~E1000_RCTL_LPE; @@ -1894,6 +1977,7 @@ em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->scattered_rx = dev->data->scattered_rx; qinfo->nb_desc = rxq->nb_rx_desc; qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.offloads = rxq->offloads; } void @@ -1911,4 +1995,5 @@ em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.wthresh = txq->wthresh; qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; + qinfo->conf.offloads = txq->offloads; } diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index 3c5138de..64dfe683 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -20,7 +20,6 @@ #include <rte_ethdev_pci.h> #include <rte_memory.h> #include <rte_eal.h> -#include <rte_atomic.h> #include <rte_malloc.h> #include <rte_dev.h> @@ -42,8 +41,6 @@ #define IGB_DEFAULT_TX_HTHRESH 1 #define IGB_DEFAULT_TX_WTHRESH ((hw->mac.type == e1000_82576) ? 1 : 16) -#define IGB_HKEY_MAX_INDEX 10 - /* Bit shift and mask */ #define IGB_4_BIT_WIDTH (CHAR_BIT / 2) #define IGB_4_BIT_MASK RTE_LEN2MASK(IGB_4_BIT_WIDTH, uint8_t) @@ -146,7 +143,7 @@ static int eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); -static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, +static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr); static void igbvf_intr_disable(struct e1000_hw *hw); @@ -171,7 +168,7 @@ static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static int igbvf_set_vfta(struct e1000_hw *hw, uint16_t vid, bool on); static void igbvf_set_vfta_all(struct rte_eth_dev *dev, bool on); -static void igbvf_default_mac_addr_set(struct rte_eth_dev *dev, +static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr); static int igbvf_get_reg_length(struct rte_eth_dev *dev); static int igbvf_get_regs(struct rte_eth_dev *dev, @@ -224,6 +221,10 @@ static int eth_igb_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); static int eth_igb_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom); +static int eth_igb_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo); +static int eth_igb_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info); static int eth_igb_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set, uint32_t nb_mc_addr); @@ -403,6 +404,8 @@ static const struct eth_dev_ops eth_igb_ops = { .get_eeprom_length = eth_igb_get_eeprom_length, .get_eeprom = eth_igb_get_eeprom, .set_eeprom = eth_igb_set_eeprom, + .get_module_info = eth_igb_get_module_info, + .get_module_eeprom = eth_igb_get_module_eeprom, .timesync_adjust_time = igb_timesync_adjust_time, .timesync_read_time = igb_timesync_read_time, .timesync_write_time = igb_timesync_write_time, @@ -432,6 +435,9 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = { .dev_supported_ptypes_get = eth_igb_supported_ptypes_get, .rx_queue_setup = eth_igb_rx_queue_setup, .rx_queue_release = eth_igb_rx_queue_release, + .rx_descriptor_done = eth_igb_rx_descriptor_done, + .rx_descriptor_status = eth_igb_rx_descriptor_status, + .tx_descriptor_status = eth_igb_tx_descriptor_status, .tx_queue_setup = eth_igb_tx_queue_setup, .tx_queue_release = eth_igb_tx_queue_release, .set_mc_addr_list = eth_igb_set_mc_addr_list, @@ -522,57 +528,6 @@ static const struct rte_igb_xstats_name_off rte_igbvf_stats_strings[] = { #define IGBVF_NB_XSTATS (sizeof(rte_igbvf_stats_strings) / \ sizeof(rte_igbvf_stats_strings[0])) -/** - * Atomically reads the link status information from global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = link; - struct rte_eth_link *src = &(dev->data->dev_link); - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = &(dev->data->dev_link); - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} static inline void igb_intr_enable(struct rte_eth_dev *dev) @@ -1559,7 +1514,7 @@ eth_igb_stop(struct rte_eth_dev *dev) /* clear the recorded link status */ memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); if (!rte_intr_allow_others(intr_handle)) /* resume to the default handler */ @@ -1635,7 +1590,7 @@ eth_igb_close(struct rte_eth_dev *dev) } memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_write_link_status(dev, &link); + rte_eth_linkstatus_set(dev, &link); } static int @@ -2196,22 +2151,15 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; switch (hw->mac.type) { case e1000_82575: @@ -2274,6 +2222,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2282,7 +2231,7 @@ eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .hthresh = IGB_DEFAULT_TX_HTHRESH, .wthresh = IGB_DEFAULT_TX_WTHRESH, }, - .txq_flags = 0, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -2325,14 +2274,9 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ dev_info->max_mac_addrs = hw->mac.rar_entry_count; - dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | @@ -2353,6 +2297,13 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) break; } + dev_info->rx_queue_offload_capa = igb_get_rx_queue_offloads_capa(dev); + dev_info->rx_offload_capa = igb_get_rx_port_offloads_capa(dev) | + dev_info->rx_queue_offload_capa; + dev_info->tx_queue_offload_capa = igb_get_tx_queue_offloads_capa(dev); + dev_info->tx_offload_capa = igb_get_tx_port_offloads_capa(dev) | + dev_info->tx_queue_offload_capa; + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = IGB_DEFAULT_RX_PTHRESH, @@ -2361,6 +2312,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) }, .rx_free_thresh = IGB_DEFAULT_RX_FREE_THRESH, .rx_drop_en = 0, + .offloads = 0, }; dev_info->default_txconf = (struct rte_eth_txconf) { @@ -2369,7 +2321,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) .hthresh = IGB_DEFAULT_TX_HTHRESH, .wthresh = IGB_DEFAULT_TX_WTHRESH, }, - .txq_flags = 0, + .offloads = 0, }; dev_info->rx_desc_lim = rx_desc_lim; @@ -2382,7 +2334,7 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct rte_eth_link link, old; + struct rte_eth_link link; int link_check, count; link_check = 0; @@ -2423,8 +2375,6 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); } memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_read_link_status(dev, &link); - old = link; /* Now we check if a transition has happened */ if (link_check) { @@ -2443,14 +2393,8 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) link.link_status = ETH_LINK_DOWN; link.link_autoneg = ETH_LINK_FIXED; } - rte_igb_dev_atomic_write_link_status(dev, &link); - /* not changed */ - if (old.link_status == link.link_status) - return -1; - - /* changed */ - return 0; + return rte_eth_linkstatus_set(dev, &link); } /* @@ -2704,7 +2648,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* Update maximum packet length */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) E1000_WRITE_REG(hw, E1000_RLPML, dev->data->dev_conf.rxmode.max_rx_pkt_len + VLAN_TAG_SIZE); @@ -2723,7 +2667,7 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); /* Update maximum packet length */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) E1000_WRITE_REG(hw, E1000_RLPML, dev->data->dev_conf.rxmode.max_rx_pkt_len + 2 * VLAN_TAG_SIZE); @@ -2732,22 +2676,25 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev) static int eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask) { + struct rte_eth_rxmode *rxmode; + + rxmode = &dev->data->dev_conf.rxmode; if(mask & ETH_VLAN_STRIP_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_strip) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) igb_vlan_hw_strip_enable(dev); else igb_vlan_hw_strip_disable(dev); } if(mask & ETH_VLAN_FILTER_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) igb_vlan_hw_filter_enable(dev); else igb_vlan_hw_filter_disable(dev); } if(mask & ETH_VLAN_EXTEND_MASK){ - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) igb_vlan_hw_extend_enable(dev); else igb_vlan_hw_extend_disable(dev); @@ -2887,8 +2834,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev, if (ret < 0) return 0; - memset(&link, 0, sizeof(link)); - rte_igb_dev_atomic_read_link_status(dev, &link); + rte_eth_linkstatus_get(dev, &link); if (link.link_status) { PMD_INIT_LOG(INFO, " Port %d: Link Up - speed %u Mbps - %s", @@ -3146,13 +3092,14 @@ eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) e1000_rar_set(hw, addr, index); } -static void +static int eth_igb_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) { eth_igb_rar_clear(dev, 0); - eth_igb_rar_set(dev, (void *)addr, 0, 0); + + return 0; } /* * Virtual Function operations @@ -3250,14 +3197,14 @@ igbvf_dev_configure(struct rte_eth_dev *dev) * Keep the persistent behavior the same as Host PF */ #ifndef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC - if (!conf->rxmode.hw_strip_crc) { + if (rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) { PMD_INIT_LOG(NOTICE, "VF can't disable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 1; + conf->rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP; } #else - if (conf->rxmode.hw_strip_crc) { + if (!rte_eth_dev_must_keep_crc(conf->rxmode.offloads)) { PMD_INIT_LOG(NOTICE, "VF can't enable HW CRC Strip"); - conf->rxmode.hw_strip_crc = 0; + conf->rxmode.offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; } #endif @@ -3504,7 +3451,7 @@ igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return 0; } -static void +static int igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) { struct e1000_hw *hw = @@ -3512,6 +3459,7 @@ igbvf_default_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *addr) /* index is not used by rar_set() */ hw->mac.ops.rar_set(hw, (void *)addr, 0); + return 0; } @@ -4499,10 +4447,12 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* switch to jumbo mode if needed */ if (frame_size > ETHER_MAX_LEN) { - dev->data->dev_conf.rxmode.jumbo_frame = 1; + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; rctl |= E1000_RCTL_LPE; } else { - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; rctl &= ~E1000_RCTL_LPE; } E1000_WRITE_REG(hw, E1000_RCTL, rctl); @@ -5384,6 +5334,86 @@ eth_igb_set_eeprom(struct rte_eth_dev *dev, } static int +eth_igb_get_module_info(struct rte_eth_dev *dev, + struct rte_eth_dev_module_info *modinfo) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + uint32_t status = 0; + uint16_t sff8472_rev, addr_mode; + bool page_swap = false; + + if (hw->phy.media_type == e1000_media_type_copper || + hw->phy.media_type == e1000_media_type_unknown) + return -EOPNOTSUPP; + + /* Check whether we support SFF-8472 or not */ + status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev); + if (status) + return -EIO; + + /* addressing mode is not supported */ + status = e1000_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode); + if (status) + return -EIO; + + /* addressing mode is not supported */ + if ((addr_mode & 0xFF) & IGB_SFF_ADDRESSING_MODE) { + PMD_DRV_LOG(ERR, + "Address change required to access page 0xA2, " + "but not supported. Please report the module " + "type to the driver maintainers.\n"); + page_swap = true; + } + + if ((sff8472_rev & 0xFF) == IGB_SFF_8472_UNSUP || page_swap) { + /* We have an SFP, but it does not support SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8079; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN; + } else { + /* We have an SFP which supports a revision of SFF-8472 */ + modinfo->type = RTE_ETH_MODULE_SFF_8472; + modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN; + } + + return 0; +} + +static int +eth_igb_get_module_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + uint32_t status = 0; + uint16_t dataword[RTE_ETH_MODULE_SFF_8472_LEN / 2 + 1]; + u16 first_word, last_word; + int i = 0; + + if (info->length == 0) + return -EINVAL; + + first_word = info->offset >> 1; + last_word = (info->offset + info->length - 1) >> 1; + + /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ + for (i = 0; i < last_word - first_word + 1; i++) { + status = e1000_read_phy_reg_i2c(hw, (first_word + i) * 2, + &dataword[i]); + if (status) { + /* Error occurred while reading module */ + return -EIO; + } + + dataword[i] = rte_be_to_cpu_16(dataword[i]); + } + + memcpy(info->data, (u8 *)dataword + (info->offset & 1), info->length); + + return 0; +} + +static int eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) { struct e1000_hw *hw = @@ -5631,7 +5661,7 @@ igb_rss_filter_restore(struct rte_eth_dev *dev) struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) igb_config_rss_filter(dev, &filter_info->rss_info, TRUE); } @@ -5654,3 +5684,9 @@ RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio-pci RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map); RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio-pci"); + +/* see e1000_logs.c */ +RTE_INIT(e1000_init_log) +{ + e1000_igb_init_log(); +} diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c index a1427596..07385291 100644 --- a/drivers/net/e1000/igb_flow.c +++ b/drivers/net/e1000/igb_flow.c @@ -175,7 +175,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_mask = item->mask; /** * Only support src & dst addresses, protocol, * others should be masked. @@ -198,7 +198,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->src_ip_mask = ipv4_mask->hdr.src_addr; filter->proto_mask = ipv4_mask->hdr.next_proto_id; - ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_spec = item->spec; filter->dst_ip = ipv4_spec->hdr.dst_addr; filter->src_ip = ipv4_spec->hdr.src_addr; filter->proto = ipv4_spec->hdr.next_proto_id; @@ -228,7 +228,7 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, /* get the TCP/UDP/SCTP info */ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { if (item->spec && item->mask) { - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_mask = item->mask; /** * Only support src & dst ports, tcp flags, @@ -263,14 +263,14 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_spec = item->spec; filter->dst_port = tcp_spec->hdr.dst_port; filter->src_port = tcp_spec->hdr.src_port; filter->tcp_flags = tcp_spec->hdr.tcp_flags; } } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) { if (item->spec && item->mask) { - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_mask = item->mask; /** * Only support src & dst ports, @@ -289,14 +289,13 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, filter->dst_port_mask = udp_mask->hdr.dst_port; filter->src_port_mask = udp_mask->hdr.src_port; - udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_spec = item->spec; filter->dst_port = udp_spec->hdr.dst_port; filter->src_port = udp_spec->hdr.src_port; } } else { if (item->spec && item->mask) { - sctp_mask = (const struct rte_flow_item_sctp *) - item->mask; + sctp_mask = item->mask; /** * Only support src & dst ports, @@ -380,6 +379,15 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); rte_flow_error_set(error, EINVAL, @@ -533,8 +541,8 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, return -rte_errno; } - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Mask bits of source MAC address must be full of 0. * Mask bits of destination MAC address must be full @@ -625,6 +633,14 @@ cons_parse_ethertype_filter(const struct rte_flow_attr *attr, } /* Not supported */ + if (attr->transfer) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + + /* Not supported */ if (attr->priority) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, @@ -848,8 +864,8 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) || tcp_mask->hdr.src_port || tcp_mask->hdr.dst_port || @@ -924,6 +940,15 @@ cons_parse_syn_filter(const struct rte_flow_attr *attr, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_syn_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + /* Support 2 priorities, the lowest or highest. */ if (!attr->priority) { filter->hig_pri = 0; @@ -1065,8 +1090,8 @@ item_loop: return -rte_errno; } - raw_spec = (const struct rte_flow_item_raw *)item->spec; - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_spec = item->spec; + raw_mask = item->mask; if (!raw_mask->length || !raw_mask->relative) { @@ -1212,6 +1237,15 @@ item_loop: return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(filter, 0, sizeof(struct rte_eth_flex_filter)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(filter, 0, sizeof(struct rte_eth_flex_filter)); rte_flow_error_set(error, EINVAL, @@ -1293,7 +1327,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, rss = (const struct rte_flow_action_rss *)act->conf; - if (!rss || !rss->num) { + if (!rss || !rss->queue_num) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, @@ -1301,7 +1335,7 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } - for (n = 0; n < rss->num; n++) { + for (n = 0; n < rss->queue_num; n++) { if (rss->queue[n] >= dev->data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -1311,14 +1345,26 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, } } - if (rss->rss_conf) - rss_conf->rss_conf = *rss->rss_conf; - else - rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL; - - for (n = 0; n < rss->num; ++n) - rss_conf->queue[n] = rss->queue[n]; - rss_conf->num = rss->num; + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act, + "too many queues for RSS context"); + if (igb_rss_conf_init(rss_conf, rss)) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act, + "RSS context initialization failure"); /* check if the next not void item is END */ index++; @@ -1350,6 +1396,15 @@ igb_parse_rss_filter(struct rte_eth_dev *dev, return -rte_errno; } + /* not supported */ + if (attr->transfer) { + memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + attr, "No support for transfer."); + return -rte_errno; + } + if (attr->priority > 0xFFFF) { memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf)); rte_flow_error_set(error, EINVAL, @@ -1519,9 +1574,8 @@ igb_flow_create(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "failed to allocate memory"); goto out; } - rte_memcpy(&rss_filter_ptr->filter_info, - &rss_conf, - sizeof(struct igb_rte_flow_rss_conf)); + igb_rss_conf_init(&rss_filter_ptr->filter_info, + &rss_conf.conf); TAILQ_INSERT_TAIL(&igb_filter_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; @@ -1758,7 +1812,7 @@ igb_clear_rss_filter(struct rte_eth_dev *dev) struct e1000_filter_info *filter = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); - if (filter->rss_info.num) + if (filter->rss_info.conf.queue_num) igb_config_rss_filter(dev, &filter->rss_info, FALSE); } diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 2f371672..b955068a 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -107,6 +107,7 @@ struct igb_rx_queue { uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ uint32_t flags; /**< RX flags. */ + uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */ }; /** @@ -180,6 +181,7 @@ struct igb_tx_queue { /**< Start context position for transmit queue. */ struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; /**< Hardware context history.*/ + uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */ }; #if 1 @@ -1447,6 +1449,33 @@ igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) igb_reset_tx_queue_stat(txq); } +uint64_t +igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + + RTE_SET_USED(dev); + rx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_MULTI_SEGS; + + return rx_offload_capa; +} + +uint64_t +igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_queue_offload_capa; + + rx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev); + + return rx_queue_offload_capa; +} + int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1458,6 +1487,9 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, struct igb_tx_queue *txq; struct e1000_hw *hw; uint32_t size; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1542,6 +1574,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, dev->tx_pkt_burst = eth_igb_xmit_pkts; dev->tx_pkt_prepare = ð_igb_prep_pkts; dev->data->tx_queues[queue_idx] = txq; + txq->offloads = offloads; return 0; } @@ -1593,6 +1626,46 @@ igb_reset_rx_queue(struct igb_rx_queue *rxq) rxq->pkt_last_seg = NULL; } +uint64_t +igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev) +{ + uint64_t rx_offload_capa; + + RTE_SET_USED(dev); + rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_KEEP_CRC | + DEV_RX_OFFLOAD_SCATTER; + + return rx_offload_capa; +} + +uint64_t +igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t rx_queue_offload_capa; + + switch (hw->mac.type) { + case e1000_vfadapt_i350: + /* + * As only one Rx queue can be used, let per queue offloading + * capability be same to per port queue offloading capability + * for better convenience. + */ + rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev); + break; + default: + rx_queue_offload_capa = 0; + } + return rx_queue_offload_capa; +} + int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1605,6 +1678,9 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, struct igb_rx_queue *rxq; struct e1000_hw *hw; unsigned int size; + uint64_t offloads; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1630,6 +1706,7 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (rxq == NULL) return -ENOMEM; + rxq->offloads = offloads; rxq->mb_pool = mp; rxq->nb_rx_desc = nb_desc; rxq->pthresh = rx_conf->rx_thresh.pthresh; @@ -1644,8 +1721,10 @@ eth_igb_rx_queue_setup(struct rte_eth_dev *dev, rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ? queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx); rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : - ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; /* * Allocate RX ring hardware descriptors. A memzone large enough to @@ -2227,6 +2306,7 @@ igb_dev_mq_rx_configure(struct rte_eth_dev *dev) int eth_igb_rx_init(struct rte_eth_dev *dev) { + struct rte_eth_rxmode *rxmode; struct e1000_hw *hw; struct igb_rx_queue *rxq; uint32_t rctl; @@ -2247,10 +2327,12 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rctl = E1000_READ_REG(hw, E1000_RCTL); E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + rxmode = &dev->data->dev_conf.rxmode; + /* * Configure support of jumbo frames, if any. */ - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { rctl |= E1000_RCTL_LPE; /* @@ -2292,9 +2374,10 @@ eth_igb_rx_init(struct rte_eth_dev *dev) * Reset crc_len in case it was changed after queue setup by a * call to configure */ - rxq->crc_len = - (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? - 0 : ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; bus_addr = rxq->rx_ring_phys_addr; E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx), @@ -2362,7 +2445,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl); } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; @@ -2406,19 +2489,27 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxcsum |= E1000_RXCSUM_PCSD; /* Enable both L3/L4 rx checksum offload */ - if (dev->data->dev_conf.rxmode.hw_ip_checksum) - rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | - E1000_RXCSUM_CRCOFL); + if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rxcsum |= E1000_RXCSUM_IPOFL; + else + rxcsum &= ~E1000_RXCSUM_IPOFL; + if (rxmode->offloads & + (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) + rxcsum |= E1000_RXCSUM_TUOFL; else - rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL | - E1000_RXCSUM_CRCOFL); + rxcsum &= ~E1000_RXCSUM_TUOFL; + if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) + rxcsum |= E1000_RXCSUM_CRCOFL; + else + rxcsum &= ~E1000_RXCSUM_CRCOFL; + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); /* Setup the Receive Control Register. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) { - rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) { + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ - /* set STRCRC bit in all queues */ + /* clear STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2427,14 +2518,14 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr |= E1000_DVMOLR_STRCRC; + dvmolr &= ~E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } } else { - rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ - /* clear STRCRC bit in all queues */ + /* set STRCRC bit in all queues */ if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211 || @@ -2443,7 +2534,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(rxq->reg_idx)); - dvmolr &= ~E1000_DVMOLR_STRCRC; + dvmolr |= E1000_DVMOLR_STRCRC; E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr); } } @@ -2654,7 +2745,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev) E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); } - if (dev->data->dev_conf.rxmode.enable_scatter) { + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) { if (!dev->data->scattered_rx) PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; @@ -2741,6 +2832,7 @@ igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; + qinfo->conf.offloads = rxq->offloads; } void @@ -2756,6 +2848,41 @@ igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_thresh.pthresh = txq->pthresh; qinfo->conf.tx_thresh.hthresh = txq->hthresh; qinfo->conf.tx_thresh.wthresh = txq->wthresh; + qinfo->conf.offloads = txq->offloads; +} + +int +igb_rss_conf_init(struct igb_rte_flow_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + out->conf = (struct rte_flow_action_rss){ + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + .key = memcpy(out->key, in->key, in->key_len), + .queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num), + }; + return 0; +} + +int +igb_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + return (comp->func == with->func && + comp->level == with->level && + comp->types == with->types && + comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); } int @@ -2764,7 +2891,12 @@ igb_config_rss_filter(struct rte_eth_dev *dev, { uint32_t shift; uint16_t i, j; - struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct rte_eth_rss_conf rss_conf = { + .rss_key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .rss_key_len = conf->conf.key_len, + .rss_hf = conf->conf.types, + }; struct e1000_filter_info *filter_info = E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -2772,8 +2904,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev, hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!add) { - if (memcmp(conf, &filter_info->rss_info, - sizeof(struct igb_rte_flow_rss_conf)) == 0) { + if (igb_action_rss_same(&filter_info->rss_info.conf, + &conf->conf)) { igb_rss_disable(dev); memset(&filter_info->rss_info, 0, sizeof(struct igb_rte_flow_rss_conf)); @@ -2782,7 +2914,7 @@ igb_config_rss_filter(struct rte_eth_dev *dev, return -EINVAL; } - if (filter_info->rss_info.num) + if (filter_info->rss_info.conf.queue_num) return -EINVAL; /* Fill in redirection table. */ @@ -2794,9 +2926,9 @@ igb_config_rss_filter(struct rte_eth_dev *dev, } reta; uint8_t q_idx; - q_idx = conf->queue[j]; - if (j == conf->num) + if (j == conf->conf.queue_num) j = 0; + q_idx = conf->conf.queue[j]; reta.bytes[i & 3] = (uint8_t)(q_idx << shift); if ((i & 3) == 3) E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); @@ -2813,8 +2945,8 @@ igb_config_rss_filter(struct rte_eth_dev *dev, rss_conf.rss_key = rss_intel_key; /* Default hash key */ igb_hw_rss_hash_set(hw, &rss_conf); - rte_memcpy(&filter_info->rss_info, - conf, sizeof(struct igb_rte_flow_rss_conf)); + if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf)) + return -EINVAL; return 0; } diff --git a/drivers/net/e1000/meson.build b/drivers/net/e1000/meson.build index 3a1bf5af..cf456995 100644 --- a/drivers/net/e1000/meson.build +++ b/drivers/net/e1000/meson.build @@ -5,6 +5,7 @@ subdir('base') objs = [base_objs] sources = files( + 'e1000_logs.c', 'em_ethdev.c', 'em_rxtx.c', 'igb_ethdev.c', |