summaryrefslogtreecommitdiffstats
path: root/drivers/net/i40e
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-12-05 11:42:44 +0100
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-12-05 11:46:26 +0100
commit32e04ea00cd159613e04acef75e52bfca6eeff2f (patch)
treef19e4885612e596bb8c8c3c5914157ae5417e180 /drivers/net/i40e
parent6cfa4f771efe39dbc944e799cbe465134c8931fa (diff)
Imported Upstream version 16.07.2
Change-Id: I76bc313e0942233ce259612069ded302dd6c87bb Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/net/i40e')
-rw-r--r--drivers/net/i40e/base/i40e_common.c2
-rw-r--r--drivers/net/i40e/i40e_ethdev.c241
-rw-r--r--drivers/net/i40e/i40e_ethdev.h4
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c81
-rw-r--r--drivers/net/i40e/i40e_pf.c33
-rw-r--r--drivers/net/i40e/i40e_pf.h3
-rw-r--r--drivers/net/i40e/i40e_rxtx.c15
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec.c20
8 files changed, 198 insertions, 201 deletions
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 98ed4b68..4407f2d3 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -773,7 +773,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index d0aeb703..13068cc4 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -108,7 +108,6 @@
I40E_PFINT_ICR0_ENA_GRST_MASK | \
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK | \
I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK | \
- I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK | \
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK | \
I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK | \
I40E_PFINT_ICR0_ENA_VFLR_MASK | \
@@ -202,7 +201,7 @@
/* Source MAC address */
#define I40E_REG_INSET_L2_SMAC 0x1C00000000000000ULL
/* Outer (S-Tag) VLAN tag in the outer L2 header */
-#define I40E_REG_INSET_L2_OUTER_VLAN 0x0200000000000000ULL
+#define I40E_REG_INSET_L2_OUTER_VLAN 0x0000000004000000ULL
/* Inner (C-Tag) or single VLAN tag in the outer L2 header */
#define I40E_REG_INSET_L2_INNER_VLAN 0x0080000000000000ULL
/* Single VLAN tag in the inner L2 header */
@@ -211,6 +210,14 @@
#define I40E_REG_INSET_L3_SRC_IP4 0x0001800000000000ULL
/* Destination IPv4 address */
#define I40E_REG_INSET_L3_DST_IP4 0x0000001800000000ULL
+/* Source IPv4 address for X722 */
+#define I40E_X722_REG_INSET_L3_SRC_IP4 0x0006000000000000ULL
+/* Destination IPv4 address for X722 */
+#define I40E_X722_REG_INSET_L3_DST_IP4 0x0000060000000000ULL
+/* IPv4 Protocol for X722 */
+#define I40E_X722_REG_INSET_L3_IP4_PROTO 0x0010000000000000ULL
+/* IPv4 Time to Live for X722 */
+#define I40E_X722_REG_INSET_L3_IP4_TTL 0x0010000000000000ULL
/* IPv4 Type of Service (TOS) */
#define I40E_REG_INSET_L3_IP4_TOS 0x0040000000000000ULL
/* IPv4 Protocol */
@@ -724,10 +731,6 @@ static struct rte_driver rte_i40e_driver = {
PMD_REGISTER_DRIVER(rte_i40e_driver, i40e);
DRIVER_REGISTER_PCI_TABLE(i40e, pci_id_i40e_map);
-/*
- * Initialize registers for flexible payload, which should be set by NVM.
- * This should be removed from code once it is fixed in NVM.
- */
#ifndef I40E_GLQF_ORT
#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
#endif
@@ -735,8 +738,12 @@ DRIVER_REGISTER_PCI_TABLE(i40e, pci_id_i40e_map);
#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
#endif
-static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
+static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
{
+ /*
+ * Initialize registers for flexible payload, which should be set by NVM.
+ * This should be removed from code once it is fixed in NVM.
+ */
I40E_WRITE_REG(hw, I40E_GLQF_ORT(18), 0x00000030);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(19), 0x00000030);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(26), 0x0000002B);
@@ -747,10 +754,12 @@ static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
I40E_WRITE_REG(hw, I40E_GLQF_ORT(20), 0x00000031);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(23), 0x00000031);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(63), 0x0000002D);
-
- /* GLQF_PIT Registers */
I40E_WRITE_REG(hw, I40E_GLQF_PIT(16), 0x00007480);
I40E_WRITE_REG(hw, I40E_GLQF_PIT(17), 0x00007440);
+
+ /* Initialize registers for parsing packet type of QinQ */
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029);
+ I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420);
}
#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
@@ -932,6 +941,9 @@ config_floating_veb(struct rte_eth_dev *dev)
}
}
+#define I40E_L2_TAGS_S_TAG_SHIFT 1
+#define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
+
static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
@@ -1002,11 +1014,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
}
/*
- * To work around the NVM issue,initialize registers
- * for flexible payload by software.
- * It should be removed once issues are fixed in NVM.
+ * To work around the NVM issue, initialize registers
+ * for flexible payload and packet type of QinQ by
+ * software. It should be removed once issues are fixed
+ * in NVM.
*/
- i40e_flex_payload_reg_init(hw);
+ i40e_GLQF_reg_init(hw);
/* Initialize the input set for filters (hash and fd) to default value */
i40e_filter_input_set_init(pf);
@@ -1120,6 +1133,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Disable double vlan by default */
i40e_vsi_config_double_vlan(vsi, FALSE);
+ /* Disable S-TAG identification when floating_veb is disabled */
+ if (!pf->floating_veb) {
+ ret = I40E_READ_REG(hw, I40E_PRT_L2TAGSEN);
+ if (ret & I40E_L2_TAGS_S_TAG_MASK) {
+ ret &= ~I40E_L2_TAGS_S_TAG_MASK;
+ I40E_WRITE_REG(hw, I40E_PRT_L2TAGSEN, ret);
+ }
+ }
+
if (!vsi->max_macaddrs)
len = ETHER_ADDR_LEN;
else
@@ -1214,11 +1236,6 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
dev->rx_pkt_burst = NULL;
dev->tx_pkt_burst = NULL;
- /* Disable LLDP */
- ret = i40e_aq_stop_lldp(hw, true, NULL);
- if (ret != I40E_SUCCESS) /* Its failure can be ignored */
- PMD_INIT_LOG(INFO, "Failed to stop lldp");
-
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
@@ -1768,6 +1785,16 @@ i40e_dev_start(struct rte_eth_dev *dev)
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
" no intr multiplex\n");
+ } else if (dev->data->dev_conf.intr_conf.lsc != 0) {
+ ret = i40e_aq_set_phy_int_mask(hw,
+ ~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MODULE_QUAL_FAIL |
+ I40E_AQ_EVENT_MEDIA_NA), NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(WARNING, "Fail to set phy mask");
+
+ /* Call get_link_info aq commond to enable LSE */
+ i40e_dev_link_update(dev, 0);
}
/* enable uio intr after callback register */
@@ -1984,6 +2011,7 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
struct rte_eth_link link, old;
int status;
unsigned rep_cnt = MAX_REPEAT_TIME;
+ bool enable_lse = dev->data->dev_conf.intr_conf.lsc ? true : false;
memset(&link, 0, sizeof(link));
memset(&old, 0, sizeof(old));
@@ -1992,7 +2020,8 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
do {
/* Get link status information from hardware */
- status = i40e_aq_get_link_info(hw, false, &link_status, NULL);
+ status = i40e_aq_get_link_info(hw, enable_lse,
+ &link_status, NULL);
if (status != I40E_SUCCESS) {
link.link_speed = ETH_SPEED_NUM_100M;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -4097,11 +4126,13 @@ i40e_vsi_release(struct i40e_vsi *vsi)
void *temp;
int ret;
struct i40e_mac_filter *f;
- uint16_t user_param = vsi->user_param;
+ uint16_t user_param;
if (!vsi)
return I40E_SUCCESS;
+ user_param = vsi->user_param;
+
pf = I40E_VSI_TO_PF(vsi);
hw = I40E_VSI_TO_HW(vsi);
@@ -5407,6 +5438,24 @@ i40e_dev_handle_vfr_event(struct rte_eth_dev *dev)
}
static void
+i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_virtchnl_pf_event event;
+ int i;
+
+ event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ event.event_data.link_event.link_status =
+ dev->data->dev_link.link_status;
+ event.event_data.link_event.link_speed =
+ (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+
+ for (i = 0; i < pf->vf_num; i++)
+ i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+}
+
+static void
i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5442,6 +5491,14 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
info.msg_buf,
info.msg_len);
break;
+ case i40e_aqc_opc_get_link_status:
+ ret = i40e_dev_link_update(dev, 0);
+ if (!ret) {
+ i40e_notify_all_vfs_link_status(dev);
+ _rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC);
+ }
+ break;
default:
PMD_DRV_LOG(ERR, "Request %u is not supported yet",
opcode);
@@ -5451,57 +5508,6 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
rte_free(info.msg_buf);
}
-/*
- * Interrupt handler is registered as the alarm callback for handling LSC
- * interrupt in a definite of time, in order to wait the NIC into a stable
- * state. Currently it waits 1 sec in i40e for the link up interrupt, and
- * no need for link down interrupt.
- */
-static void
-i40e_dev_interrupt_delayed_handler(void *param)
-{
- struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t icr0;
-
- /* read interrupt causes again */
- icr0 = I40E_READ_REG(hw, I40E_PFINT_ICR0);
-
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
- if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
- PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error\n");
- if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
- PMD_DRV_LOG(ERR, "ICR0: malicious programming detected\n");
- if (icr0 & I40E_PFINT_ICR0_GRST_MASK)
- PMD_DRV_LOG(INFO, "ICR0: global reset requested\n");
- if (icr0 & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
- PMD_DRV_LOG(INFO, "ICR0: PCI exception\n activated\n");
- if (icr0 & I40E_PFINT_ICR0_STORM_DETECT_MASK)
- PMD_DRV_LOG(INFO, "ICR0: a change in the storm control "
- "state\n");
- if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK)
- PMD_DRV_LOG(ERR, "ICR0: HMC error\n");
- if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
- PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error\n");
-#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
-
- if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
- PMD_DRV_LOG(INFO, "INT:VF reset detected\n");
- i40e_dev_handle_vfr_event(dev);
- }
- if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
- PMD_DRV_LOG(INFO, "INT:ADMINQ event\n");
- i40e_dev_handle_aq_msg(dev);
- }
-
- /* handle the link up interrupt in an alarm callback */
- i40e_dev_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
-
- i40e_pf_enable_irq0(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
-}
-
/**
* Interrupt handler triggered by NIC for handling
* specific interrupt.
@@ -5558,31 +5564,6 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
PMD_DRV_LOG(INFO, "ICR0: adminq event");
i40e_dev_handle_aq_msg(dev);
}
-
- /* Link Status Change interrupt */
- if (icr0 & I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK) {
-#define I40E_US_PER_SECOND 1000000
- struct rte_eth_link link;
-
- PMD_DRV_LOG(INFO, "ICR0: link status changed\n");
- memset(&link, 0, sizeof(link));
- rte_i40e_dev_atomic_read_link_status(dev, &link);
- i40e_dev_link_update(dev, 0);
-
- /*
- * For link up interrupt, it needs to wait 1 second to let the
- * hardware be a stable state. Otherwise several consecutive
- * interrupts can be observed.
- * For link down interrupt, no need to wait.
- */
- if (!link.link_status && rte_eal_alarm_set(I40E_US_PER_SECOND,
- i40e_dev_interrupt_delayed_handler, (void *)dev) >= 0)
- return;
- else
- _rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC);
- }
-
done:
/* Enable interrupt */
i40e_pf_enable_irq0(hw);
@@ -7372,25 +7353,23 @@ i40e_parse_input_set(uint64_t *inset,
* and vice versa
*/
static uint64_t
-i40e_translate_input_set_reg(uint64_t input)
+i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input)
{
uint64_t val = 0;
uint16_t i;
- static const struct {
+ struct inset_map {
uint64_t inset;
uint64_t inset_reg;
- } inset_map[] = {
+ };
+
+ static const struct inset_map inset_map_common[] = {
{I40E_INSET_DMAC, I40E_REG_INSET_L2_DMAC},
{I40E_INSET_SMAC, I40E_REG_INSET_L2_SMAC},
{I40E_INSET_VLAN_OUTER, I40E_REG_INSET_L2_OUTER_VLAN},
{I40E_INSET_VLAN_INNER, I40E_REG_INSET_L2_INNER_VLAN},
{I40E_INSET_LAST_ETHER_TYPE, I40E_REG_INSET_LAST_ETHER_TYPE},
- {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
- {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
{I40E_INSET_IPV4_TOS, I40E_REG_INSET_L3_IP4_TOS},
- {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
- {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
{I40E_INSET_IPV6_SRC, I40E_REG_INSET_L3_SRC_IP6},
{I40E_INSET_IPV6_DST, I40E_REG_INSET_L3_DST_IP6},
{I40E_INSET_IPV6_TC, I40E_REG_INSET_L3_IP6_TC},
@@ -7419,13 +7398,40 @@ i40e_translate_input_set_reg(uint64_t input)
{I40E_INSET_FLEX_PAYLOAD_W8, I40E_REG_INSET_FLEX_PAYLOAD_WORD8},
};
+ /* some different registers map in x722*/
+ static const struct inset_map inset_map_diff_x722[] = {
+ {I40E_INSET_IPV4_SRC, I40E_X722_REG_INSET_L3_SRC_IP4},
+ {I40E_INSET_IPV4_DST, I40E_X722_REG_INSET_L3_DST_IP4},
+ {I40E_INSET_IPV4_PROTO, I40E_X722_REG_INSET_L3_IP4_PROTO},
+ {I40E_INSET_IPV4_TTL, I40E_X722_REG_INSET_L3_IP4_TTL},
+ };
+
+ static const struct inset_map inset_map_diff_not_x722[] = {
+ {I40E_INSET_IPV4_SRC, I40E_REG_INSET_L3_SRC_IP4},
+ {I40E_INSET_IPV4_DST, I40E_REG_INSET_L3_DST_IP4},
+ {I40E_INSET_IPV4_PROTO, I40E_REG_INSET_L3_IP4_PROTO},
+ {I40E_INSET_IPV4_TTL, I40E_REG_INSET_L3_IP4_TTL},
+ };
+
if (input == 0)
return val;
/* Translate input set to register aware inset */
- for (i = 0; i < RTE_DIM(inset_map); i++) {
- if (input & inset_map[i].inset)
- val |= inset_map[i].inset_reg;
+ if (type == I40E_MAC_X722) {
+ for (i = 0; i < RTE_DIM(inset_map_diff_x722); i++) {
+ if (input & inset_map_diff_x722[i].inset)
+ val |= inset_map_diff_x722[i].inset_reg;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(inset_map_diff_not_x722); i++) {
+ if (input & inset_map_diff_not_x722[i].inset)
+ val |= inset_map_diff_not_x722[i].inset_reg;
+ }
+ }
+
+ for (i = 0; i < RTE_DIM(inset_map_common); i++) {
+ if (input & inset_map_common[i].inset)
+ val |= inset_map_common[i].inset_reg;
}
return val;
@@ -7510,7 +7516,8 @@ i40e_filter_input_set_init(struct i40e_pf *pf)
I40E_INSET_MASK_NUM_REG);
if (num < 0)
return;
- inset_reg = i40e_translate_input_set_reg(input_set);
+ inset_reg = i40e_translate_input_set_reg(hw->mac.type,
+ input_set);
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
(uint32_t)(inset_reg & UINT32_MAX));
@@ -7592,7 +7599,7 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw,
if (num < 0)
return -EINVAL;
- inset_reg |= i40e_translate_input_set_reg(input_set);
+ inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype),
(uint32_t)(inset_reg & UINT32_MAX));
@@ -7668,7 +7675,7 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf,
if (num < 0)
return -EINVAL;
- inset_reg |= i40e_translate_input_set_reg(input_set);
+ inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
(uint32_t)(inset_reg & UINT32_MAX));
@@ -9148,17 +9155,13 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
* LLDP MIB change event.
*/
if (sw_dcb == TRUE) {
- ret = i40e_aq_stop_lldp(hw, TRUE, NULL);
- if (ret != I40E_SUCCESS)
- PMD_INIT_LOG(DEBUG, "Failed to stop lldp");
-
ret = i40e_init_dcb(hw);
- /* if sw_dcb, lldp agent is stopped, the return from
+ /* If lldp agent is stopped, the return value from
* i40e_init_dcb we expect is failure with I40E_AQ_RC_EPERM
- * adminq status.
+ * adminq status. Otherwise, it should return success.
*/
- if (ret != I40E_SUCCESS &&
- hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ if ((ret == I40E_SUCCESS) || (ret != I40E_SUCCESS &&
+ hw->aq.asq_last_status == I40E_AQ_RC_EPERM)) {
memset(&hw->local_dcbx_config, 0,
sizeof(struct i40e_dcbx_config));
/* set dcb default configuration */
@@ -9187,8 +9190,8 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
return -ENOSYS;
}
} else {
- PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
- " aq_err = %d.", ret,
+ PMD_INIT_LOG(ERR, "DCB initialization in FW fails,"
+ " err = %d, aq_err = %d.", ret,
hw->aq.asq_last_status);
return -ENOTSUP;
}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 92c8fad0..61dfa932 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -599,7 +599,9 @@ int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
struct rte_eth_input_set_conf *conf);
-
+int i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, uint32_t opcode,
+ uint32_t retval, uint8_t *msg,
+ uint16_t msglen);
void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index a616ae0b..ba63a7f1 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -126,8 +126,6 @@ static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
-static int i40evf_get_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link);
static int i40evf_init_vlan(struct rte_eth_dev *dev);
static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
@@ -1084,31 +1082,6 @@ i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
return err;
}
-static int
-i40evf_get_link_status(struct rte_eth_dev *dev, struct rte_eth_link *link)
-{
- struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- int err;
- struct vf_cmd_info args;
- struct rte_eth_link *new_link;
-
- args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_GET_LINK_STAT;
- args.in_args = NULL;
- args.in_args_size = 0;
- args.out_buffer = vf->aq_resp;
- args.out_size = I40E_AQ_BUF_SZ;
- err = i40evf_execute_vf_cmd(dev, &args);
- if (err) {
- PMD_DRV_LOG(ERR, "fail to execute command OP_GET_LINK_STAT");
- return err;
- }
-
- new_link = (struct rte_eth_link *)args.out_buffer;
- (void)rte_memcpy(link, new_link, sizeof(*link));
-
- return 0;
-}
-
static const struct rte_pci_id pci_id_i40evf_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
@@ -2166,35 +2139,33 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
* DPDK pf host provide interfacet to acquire link status
* while Linux driver does not
*/
- if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
- i40evf_get_link_status(dev, &new_link);
- else {
- /* Linux driver PF host */
- switch (vf->link_speed) {
- case I40E_LINK_SPEED_100MB:
- new_link.link_speed = ETH_SPEED_NUM_100M;
- break;
- case I40E_LINK_SPEED_1GB:
- new_link.link_speed = ETH_SPEED_NUM_1G;
- break;
- case I40E_LINK_SPEED_10GB:
- new_link.link_speed = ETH_SPEED_NUM_10G;
- break;
- case I40E_LINK_SPEED_20GB:
- new_link.link_speed = ETH_SPEED_NUM_20G;
- break;
- case I40E_LINK_SPEED_40GB:
- new_link.link_speed = ETH_SPEED_NUM_40G;
- break;
- default:
- new_link.link_speed = ETH_SPEED_NUM_100M;
- break;
- }
- /* full duplex only */
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- new_link.link_status = vf->link_up ? ETH_LINK_UP :
- ETH_LINK_DOWN;
+
+ /* Linux driver PF host */
+ switch (vf->link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ new_link.link_speed = ETH_SPEED_NUM_1G;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ new_link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ new_link.link_speed = ETH_SPEED_NUM_20G;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ new_link.link_speed = ETH_SPEED_NUM_40G;
+ break;
+ default:
+ new_link.link_speed = ETH_SPEED_NUM_100M;
+ break;
}
+ /* full duplex only */
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_status = vf->link_up ? ETH_LINK_UP :
+ ETH_LINK_DOWN;
+
i40evf_dev_atomic_write_link_status(dev, &new_link);
return 0;
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index d5b2d450..4e2f6b63 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -250,7 +250,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
return ret;
}
-static int
+int
i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
uint32_t opcode,
uint32_t retval,
@@ -847,18 +847,6 @@ i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf)
return I40E_SUCCESS;
}
-static void
-i40e_pf_host_process_cmd_get_link_status(struct i40e_pf_vf *vf)
-{
- struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(vf->pf->main_vsi);
-
- /* Update link status first to acquire latest link change */
- i40e_dev_link_update(dev, 1);
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_LINK_STAT,
- I40E_SUCCESS, (uint8_t *)&dev->data->dev_link,
- sizeof(struct rte_eth_link));
-}
-
static int
i40e_pf_host_process_cmd_cfg_vlan_offload(
struct i40e_pf_vf *vf,
@@ -909,6 +897,20 @@ send_msg:
return ret;
}
+static void
+i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
+{
+ struct i40e_virtchnl_pf_event event;
+
+ event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
+ event.event_data.link_event.link_status =
+ dev->data->dev_link.link_status;
+ event.event_data.link_event.link_speed =
+ (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
+ I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+}
+
void
i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint16_t abs_vf_id, uint32_t opcode,
@@ -964,6 +966,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
+ i40e_notify_vf_link_status(dev, vf);
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
@@ -993,10 +996,6 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
PMD_DRV_LOG(INFO, "OP_GET_STATS received");
i40e_pf_host_process_cmd_get_stats(vf);
break;
- case I40E_VIRTCHNL_OP_GET_LINK_STAT:
- PMD_DRV_LOG(INFO, "OP_GET_LINK_STAT received");
- i40e_pf_host_process_cmd_get_link_status(vf);
- break;
case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen);
diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h
index 9c01829a..cddc45cf 100644
--- a/drivers/net/i40e/i40e_pf.h
+++ b/drivers/net/i40e/i40e_pf.h
@@ -59,9 +59,8 @@ enum i40e_virtchnl_ops_dpdk {
* Keep some gap between Linux PF commands and
* DPDK PF extended commands.
*/
- I40E_VIRTCHNL_OP_GET_LINK_STAT = I40E_VIRTCHNL_OP_VERSION +
+ I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD = I40E_VIRTCHNL_OP_VERSION +
I40E_DPDK_OFFSET,
- I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
};
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 554d1679..0556a4d4 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -2948,11 +2948,15 @@ i40e_dev_clear_queues(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
i40e_tx_queue_release_mbufs(dev->data->tx_queues[i]);
i40e_reset_tx_queue(dev->data->tx_queues[i]);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
i40e_rx_queue_release_mbufs(dev->data->rx_queues[i]);
i40e_reset_rx_queue(dev->data->rx_queues[i]);
}
@@ -2966,12 +2970,16 @@ i40e_dev_free_queues(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ if (!dev->data->rx_queues[i])
+ continue;
i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
}
dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ if (!dev->data->tx_queues[i])
+ continue;
i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
}
@@ -3154,7 +3162,7 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
struct i40e_rx_queue *rxq =
dev->data->rx_queues[i];
- if (i40e_rxq_vec_setup(rxq)) {
+ if (rxq && i40e_rxq_vec_setup(rxq)) {
ad->rx_vec_allowed = false;
break;
}
@@ -3216,7 +3224,8 @@ i40e_set_rx_function(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
- rxq->rx_using_sse = rx_using_sse;
+ if (rxq)
+ rxq->rx_using_sse = rx_using_sse;
}
}
}
@@ -3255,7 +3264,7 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
struct i40e_tx_queue *txq =
dev->data->tx_queues[i];
- if (i40e_txq_vec_setup(txq)) {
+ if (txq && i40e_txq_vec_setup(txq)) {
ad->tx_vec_allowed = false;
break;
}
diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c
index 51fb282a..a9649d35 100644
--- a/drivers/net/i40e/i40e_rxtx_vec.c
+++ b/drivers/net/i40e/i40e_rxtx_vec.c
@@ -282,7 +282,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
-
+ rte_compiler_barrier();
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
@@ -290,8 +290,10 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
+ rte_compiler_barrier();
/* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
+ rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
/* B.2 copy 2 mbuf point into rx_pkts */
@@ -692,8 +694,20 @@ i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
return;
/* free all mbufs that are valid in the ring */
- for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask)
- rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ if (rxq->rxrearm_nb == 0) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_ring[i].mbuf != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ } else {
+ for (i = rxq->rx_tail;
+ i != rxq->rxrearm_start;
+ i = (i + 1) & mask) {
+ if (rxq->sw_ring[i].mbuf != NULL)
+ rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+ }
+ }
+
rxq->rxrearm_nb = rxq->nb_rx_desc;
/* set all entries to NULL */