summaryrefslogtreecommitdiffstats
path: root/drivers/net/i40e
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/i40e')
-rw-r--r--drivers/net/i40e/base/i40e_dcb.c2
-rw-r--r--drivers/net/i40e/i40e_ethdev.c47
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c7
-rw-r--r--drivers/net/i40e/i40e_pf.c29
-rw-r--r--drivers/net/i40e/i40e_rxtx.c29
-rw-r--r--drivers/net/i40e/i40e_rxtx.h2
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c2
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c16
9 files changed, 115 insertions, 23 deletions
diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c
index 26c344fd..9b5405db 100644
--- a/drivers/net/i40e/base/i40e_dcb.c
+++ b/drivers/net/i40e/base/i40e_dcb.c
@@ -396,6 +396,8 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
dcbcfg->numapps = length / sizeof(*app);
if (!dcbcfg->numapps)
return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
for (i = 0; i < dcbcfg->numapps; i++) {
u8 up, selector;
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index bf7e5a05..4e4cd16a 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -460,6 +460,7 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
@@ -2069,6 +2070,8 @@ out:
if (link.link_status == old.link_status)
return -1;
+ i40e_notify_all_vfs_link_status(dev);
+
return 0;
}
@@ -4105,6 +4108,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
hw->aq.asq_last_status);
goto fail;
}
+ veb->enabled_tc = I40E_DEFAULT_TCMAP;
/* get statistics index */
ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
@@ -5504,11 +5508,9 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
break;
case i40e_aqc_opc_get_link_status:
ret = i40e_dev_link_update(dev, 0);
- if (!ret) {
- i40e_notify_all_vfs_link_status(dev);
+ if (!ret)
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
- }
break;
default:
PMD_DRV_LOG(ERR, "Request %u is not supported yet",
@@ -7051,7 +7053,44 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
pctype = i40e_flowtype_to_pctype(i);
reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
+ reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
+ reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
+ reg);
+ } else {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
+ reg);
+ }
+ } else {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ }
}
reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 640d316a..efd4fac1 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -1997,6 +1997,10 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
}
list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return;
+ }
for (i = begin; i < next_begin; i++) {
addr = &dev->data->mac_addrs[i];
@@ -2142,6 +2146,9 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
case I40E_LINK_SPEED_20GB:
new_link.link_speed = ETH_SPEED_NUM_20G;
break;
+ case I40E_LINK_SPEED_25GB:
+ new_link.link_speed = ETH_SPEED_NUM_25G;
+ break;
case I40E_LINK_SPEED_40GB:
new_link.link_speed = ETH_SPEED_NUM_40G;
break;
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 97b8eccc..b36d9019 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -907,8 +907,33 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
dev->data->dev_link.link_status;
- event.event_data.link_event.link_speed =
- (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+
+ /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */
+ switch (dev->data->dev_link.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB;
+ break;
+ case ETH_SPEED_NUM_1G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB;
+ break;
+ case ETH_SPEED_NUM_20G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB;
+ break;
+ case ETH_SPEED_NUM_25G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB;
+ break;
+ case ETH_SPEED_NUM_40G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ break;
+ default:
+ event.event_data.link_event.link_speed =
+ I40E_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1b25b2f2..602e40c9 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -598,6 +598,7 @@ static inline uint16_t
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev;
uint16_t nb_rx = 0;
if (!nb_pkts)
@@ -615,9 +616,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (i40e_rx_alloc_bufs(rxq) != 0) {
uint16_t i, j;
- PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
- "port_id=%u, queue_id=%u",
- rxq->port_id, rxq->queue_id);
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
rxq->rx_nb_avail = 0;
rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
@@ -679,6 +681,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
union i40e_rx_desc rxd;
struct i40e_rx_entry *sw_ring;
struct i40e_rx_entry *rxe;
+ struct rte_eth_dev *dev;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
uint16_t nb_rx;
@@ -707,10 +710,13 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
break;
nmb = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(!nmb))
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
- rxd = *rxdp;
+ }
+ rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
rx_id++;
@@ -802,6 +808,7 @@ i40e_recv_scattered_pkts(void *rx_queue,
struct rte_mbuf *nmb, *rxm;
uint16_t rx_id = rxq->rx_tail;
uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
uint32_t rx_status;
uint64_t qword1;
uint64_t dma_addr;
@@ -818,8 +825,12 @@ i40e_recv_scattered_pkts(void *rx_queue,
break;
nmb = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(!nmb))
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
+ }
+
rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
@@ -1717,11 +1728,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
-#else
- len = nb_desc;
-#endif
/* Allocate the software ring. */
rxq->sw_ring =
@@ -2129,11 +2136,11 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
((volatile char *)rxq->rx_ring)[i] = 0;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index ecdb13cb..635ed48b 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -113,11 +113,11 @@ struct i40e_rx_queue {
uint16_t nb_rx_hold; /**< number of held free RX desc */
struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */
struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */
+ struct rte_mbuf fake_mbuf; /**< dummy mbuf */
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
uint16_t rx_nb_avail; /**< number of staged packets ready */
uint16_t rx_next_avail; /**< index of next staged packets */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
- struct rte_mbuf fake_mbuf; /**< dummy mbuf */
struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
#endif
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 990520f3..23b4e352 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -243,6 +243,10 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
if (rxmode->header_split == 1)
return -1;
+ /* no QinQ support */
+ if (rxmode->hw_vlan_extend == 1)
+ return -1;
+
return 0;
#else
RTE_SET_USED(dev);
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 011c54e0..d235daa7 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -205,7 +205,7 @@ desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
for (i = 0; i < 4; i++) {
tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
ptype = vgetq_lane_u8(tmp, 8);
- rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
+ rx_pkts[i]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
}
}
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index b95cc8e1..9644dd61 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -320,20 +320,26 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i descs[RTE_I40E_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -342,8 +348,10 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);