aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-03-02 16:15:51 +0100
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-03-03 14:41:36 +0100
commitce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (patch)
tree3a9e9f8f6a62c7146fb391eae34481b2af4f7ff2 /drivers/net
parent6b3e017e5d25f15da73f7700f7f2ac553ef1a2e9 (diff)
Imported Upstream version 16.11.1
Change-Id: I1e965265578efaaf08e5628607f53d2386d2df9f Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c13
-rw-r--r--drivers/net/bnx2x/bnx2x.c1
-rw-r--r--drivers/net/bnx2x/bnx2x.h11
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c9
-rw-r--r--drivers/net/bnx2x/bnx2x_vfpf.c1
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c2
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c2
-rw-r--r--drivers/net/ena/base/ena_com.c16
-rw-r--r--drivers/net/ena/ena_ethdev.c21
-rw-r--r--drivers/net/enic/enic.h6
-rw-r--r--drivers/net/enic/enic_clsf.c14
-rw-r--r--drivers/net/enic/enic_main.c5
-rw-r--r--drivers/net/enic/enic_res.c5
-rw-r--r--drivers/net/enic/enic_res.h5
-rw-r--r--drivers/net/enic/enic_rxtx.c13
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c3
-rw-r--r--drivers/net/i40e/i40e_ethdev.c62
-rw-r--r--drivers/net/i40e/i40e_ethdev.h4
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c44
-rw-r--r--drivers/net/i40e/i40e_pf.c6
-rw-r--r--drivers/net/i40e/i40e_pf.h5
-rw-r--r--drivers/net/i40e/i40e_rxtx.c8
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h1
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c37
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c23
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c29
-rw-r--r--drivers/net/mlx4/mlx4.c18
-rw-r--r--drivers/net/mlx5/mlx5.c4
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c110
-rw-r--r--drivers/net/mlx5/mlx5_prm.h47
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c126
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h2
-rw-r--r--drivers/net/mlx5/mlx5_txq.c2
-rw-r--r--drivers/net/nfp/nfp_net.c10
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c2
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c2
-rw-r--r--drivers/net/qede/base/reg_addr.h6
-rw-r--r--drivers/net/qede/qede_eth_if.c101
-rw-r--r--drivers/net/qede/qede_eth_if.h38
-rw-r--r--drivers/net/qede/qede_ethdev.c341
-rw-r--r--drivers/net/qede/qede_ethdev.h15
-rw-r--r--drivers/net/qede/qede_main.c3
-rw-r--r--drivers/net/qede/qede_rxtx.c14
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c85
-rw-r--r--drivers/net/virtio/virtio_ethdev.c75
-rw-r--r--drivers/net/virtio/virtio_ethdev.h5
-rw-r--r--drivers/net/virtio/virtio_pci.c80
-rw-r--r--drivers/net/virtio/virtio_pci.h24
-rw-r--r--drivers/net/virtio/virtio_rxtx.c28
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c60
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h5
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c25
-rw-r--r--drivers/net/virtio/virtqueue.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c39
56 files changed, 992 insertions, 641 deletions
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index ff450685..45c6519f 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -261,9 +261,16 @@ eth_dev_stop(struct rte_eth_dev *dev)
sockfd = internals->rx_queue[i].sockfd;
if (sockfd != -1)
close(sockfd);
- sockfd = internals->tx_queue[i].sockfd;
- if (sockfd != -1)
- close(sockfd);
+
+ /* Prevent use after free in case tx fd == rx fd */
+ if (sockfd != internals->tx_queue[i].sockfd) {
+ sockfd = internals->tx_queue[i].sockfd;
+ if (sockfd != -1)
+ close(sockfd);
+ }
+
+ internals->rx_queue[i].sockfd = -1;
+ internals->tx_queue[i].sockfd = -1;
}
dev->data->dev_link.link_status = ETH_LINK_DOWN;
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 28566302..0d16a737 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -1438,6 +1438,7 @@ bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
break;
+ case BNX2X_RX_MODE_ALLMULTI_PROMISC:
case BNX2X_RX_MODE_PROMISC:
/*
* According to deffinition of SI mode, iface in promisc mode
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 5cefea43..57093054 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -1146,11 +1146,12 @@ struct bnx2x_softc {
#define BNX2X_RECOVERY_NIC_LOADING 5
uint32_t rx_mode;
-#define BNX2X_RX_MODE_NONE 0
-#define BNX2X_RX_MODE_NORMAL 1
-#define BNX2X_RX_MODE_ALLMULTI 2
-#define BNX2X_RX_MODE_PROMISC 3
-#define BNX2X_MAX_MULTICAST 64
+#define BNX2X_RX_MODE_NONE 0
+#define BNX2X_RX_MODE_NORMAL 1
+#define BNX2X_RX_MODE_ALLMULTI 2
+#define BNX2X_RX_MODE_ALLMULTI_PROMISC 3
+#define BNX2X_RX_MODE_PROMISC 4
+#define BNX2X_MAX_MULTICAST 64
struct bnx2x_port port;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index 0eae433f..a8aebbe3 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -256,6 +256,8 @@ bnx2x_promisc_enable(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
sc->rx_mode = BNX2X_RX_MODE_PROMISC;
+ if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
+ sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
bnx2x_set_rx_mode(sc);
}
@@ -266,6 +268,8 @@ bnx2x_promisc_disable(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
sc->rx_mode = BNX2X_RX_MODE_NORMAL;
+ if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
+ sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
bnx2x_set_rx_mode(sc);
}
@@ -276,6 +280,8 @@ bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
+ sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
bnx2x_set_rx_mode(sc);
}
@@ -286,6 +292,8 @@ bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
sc->rx_mode = BNX2X_RX_MODE_NORMAL;
+ if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
+ sc->rx_mode = BNX2X_RX_MODE_PROMISC;
bnx2x_set_rx_mode(sc);
}
@@ -422,6 +430,7 @@ bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
xstats[num].value =
*(uint64_t *)((char *)&sc->eth_stats +
bnx2x_xstats_strings[num].offset_lo);
+ xstats[num].id = num;
}
return num;
diff --git a/drivers/net/bnx2x/bnx2x_vfpf.c b/drivers/net/bnx2x/bnx2x_vfpf.c
index c47beb0e..0ca0df87 100644
--- a/drivers/net/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/bnx2x/bnx2x_vfpf.c
@@ -648,6 +648,7 @@ bnx2x_vf_set_rx_mode(struct bnx2x_softc *sc)
query->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
query->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
break;
+ case BNX2X_RX_MODE_ALLMULTI_PROMISC:
case BNX2X_RX_MODE_PROMISC:
query->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
query->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index 7e79adf6..c089b068 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -1532,7 +1532,7 @@ int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
{
unsigned int base = adapter->params.pci.vpd_cap_addr;
int ret;
- u32 stats_reg;
+ u32 stats_reg = 0;
int max_poll;
/* VPD Accesses must alway be 4-byte aligned!
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 922155b4..345f9b03 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -959,7 +959,7 @@ int setup_rss(struct port_info *pi)
dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n",
__func__, pi->rss_size, pi->n_rx_qsets);
- if (!pi->flags & PORT_RSS_DONE) {
+ if (!(pi->flags & PORT_RSS_DONE)) {
if (adapter->flags & FULL_INIT_DONE) {
/* Fill default values with equal distribution */
for (j = 0; j < pi->rss_size; j++)
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index 88053e33..bd6f3c6b 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -2590,19 +2590,11 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
struct ena_com_admin_queue *admin_queue;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
+ int ret;
- int ret = 0;
-
- if (unlikely(!ena_dev)) {
- ena_trc_err("%s : ena_dev is NULL\n", __func__);
- return ENA_COM_NO_DEVICE;
- }
-
- if (!ena_com_check_supported_feature_id(ena_dev,
- ENA_ADMIN_HOST_ATTR_CONFIG)) {
- ena_trc_warn("Set host attribute isn't supported\n");
- return ENA_COM_PERMISSION;
- }
+ /* Host attribute config is called before ena_com_get_dev_attr_feat
+ * so ena_com can't check if the feature is supported.
+ */
memset(&cmd, 0x0, sizeof(cmd));
admin_queue = &ena_dev->admin_queue;
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index ab9a178f..c1fd7bb3 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -357,12 +357,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
- if (rc == -EPERM)
- RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
- else
- RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
-
- goto err;
+ RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
+ if (rc != -EPERM)
+ goto err;
}
return;
@@ -413,11 +410,9 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(&adapter->ena_dev);
if (rc) {
- if (rc == -EPERM)
- RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
- else
- RTE_LOG(ERR, PMD, "Cannot set host attributes\n");
- goto err;
+ RTE_LOG(WARNING, PMD, "Cannot set host attributes\n");
+ if (rc != -EPERM)
+ goto err;
}
return;
@@ -1228,14 +1223,14 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
goto err_mmio_read_less;
}
- ena_config_host_info(ena_dev);
-
/* To enable the msix interrupts the driver needs to know the number
* of queues. So the driver uses polling mode to retrieve this
* information.
*/
ena_com_set_admin_polling_mode(ena_dev, true);
+ ena_config_host_info(ena_dev);
+
/* Get Device Attributes and features */
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
if (rc) {
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 865cd76e..a3d2a0fb 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -102,6 +102,7 @@ struct enic_fdir {
struct enic_soft_stats {
rte_atomic64_t rx_nombuf;
rte_atomic64_t rx_packet_errors;
+ rte_atomic64_t tx_oversized;
};
struct enic_memzone_entry {
@@ -301,8 +302,7 @@ int enic_link_update(struct enic *enic);
void enic_fdir_info(struct enic *enic);
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks);
-void copy_fltr_v2(__rte_unused struct filter_v2 *fltr,
- __rte_unused struct rte_eth_fdir_input *input,
__rte_unused struct rte_eth_fdir_masks *masks);
+void copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c
index bcf479ac..487f8044 100644
--- a/drivers/net/enic/enic_clsf.c
+++ b/drivers/net/enic/enic_clsf.c
@@ -211,15 +211,15 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
if (input->flow.ip4_flow.tos) {
- ip4_mask.type_of_service = 0xff;
+ ip4_mask.type_of_service = masks->ipv4_mask.tos;
ip4_val.type_of_service = input->flow.ip4_flow.tos;
}
if (input->flow.ip4_flow.ttl) {
- ip4_mask.time_to_live = 0xff;
+ ip4_mask.time_to_live = masks->ipv4_mask.ttl;
ip4_val.time_to_live = input->flow.ip4_flow.ttl;
}
if (input->flow.ip4_flow.proto) {
- ip4_mask.next_proto_id = 0xff;
+ ip4_mask.next_proto_id = masks->ipv4_mask.proto;
ip4_val.next_proto_id = input->flow.ip4_flow.proto;
}
if (input->flow.ip4_flow.src_ip) {
@@ -299,7 +299,7 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
if (input->flow.ipv6_flow.proto) {
- ipv6_mask.proto = 0xff;
+ ipv6_mask.proto = masks->ipv6_mask.proto;
ipv6_val.proto = input->flow.ipv6_flow.proto;
}
for (i = 0; i < 4; i++) {
@@ -315,11 +315,11 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
input->flow.ipv6_flow.dst_ip[i];
}
if (input->flow.ipv6_flow.tc) {
- ipv6_mask.vtc_flow = 0x00ff0000;
- ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 16;
+ ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
+ ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
}
if (input->flow.ipv6_flow.hop_limits) {
- ipv6_mask.hop_limits = 0xff;
+ ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
}
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index f0b15ac1..1861a32c 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -137,6 +137,7 @@ static void enic_clear_soft_stats(struct enic *enic)
struct enic_soft_stats *soft_stats = &enic->soft_stats;
rte_atomic64_clear(&soft_stats->rx_nombuf);
rte_atomic64_clear(&soft_stats->rx_packet_errors);
+ rte_atomic64_clear(&soft_stats->tx_oversized);
}
static void enic_init_soft_stats(struct enic *enic)
@@ -144,6 +145,7 @@ static void enic_init_soft_stats(struct enic *enic)
struct enic_soft_stats *soft_stats = &enic->soft_stats;
rte_atomic64_init(&soft_stats->rx_nombuf);
rte_atomic64_init(&soft_stats->rx_packet_errors);
+ rte_atomic64_init(&soft_stats->tx_oversized);
enic_clear_soft_stats(enic);
}
@@ -183,7 +185,8 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
r_stats->obytes = stats->tx.tx_bytes_ok;
r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop;
- r_stats->oerrors = stats->tx.tx_errors;
+ r_stats->oerrors = stats->tx.tx_errors
+ + rte_atomic64_read(&soft_stats->tx_oversized);
r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 8a230a16..867bd25c 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -89,10 +89,11 @@ int enic_get_vnic_config(struct enic *enic)
/* max packet size is only defined in newer VIC firmware
* and will be 0 for legacy firmware and VICs
*/
- if (c->max_pkt_size > ENIC_DEFAULT_MAX_PKT_SIZE)
+ if (c->max_pkt_size > ENIC_DEFAULT_RX_MAX_PKT_SIZE)
enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4);
else
- enic->max_mtu = ENIC_DEFAULT_MAX_PKT_SIZE - (ETHER_HDR_LEN + 4);
+ enic->max_mtu = ENIC_DEFAULT_RX_MAX_PKT_SIZE
+ - (ETHER_HDR_LEN + 4);
if (c->mtu == 0)
c->mtu = 1500;
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 303530ef..1135d2e1 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -48,7 +48,10 @@
#define ENIC_MIN_MTU 68
/* Does not include (possible) inserted VLAN tag and FCS */
-#define ENIC_DEFAULT_MAX_PKT_SIZE 9022
+#define ENIC_DEFAULT_RX_MAX_PKT_SIZE 9022
+
+/* Does not include (possible) inserted VLAN tag and FCS */
+#define ENIC_TX_MAX_PKT_SIZE 9208
#define ENIC_MULTICAST_PERFECT_FILTERS 32
#define ENIC_UNICAST_PERFECT_FILTERS 32
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index f762a26c..912ea157 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -477,16 +477,23 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
for (index = 0; index < nb_pkts; index++) {
tx_pkt = *tx_pkts++;
+ pkt_len = tx_pkt->pkt_len;
+ data_len = tx_pkt->data_len;
+ ol_flags = tx_pkt->ol_flags;
nb_segs = tx_pkt->nb_segs;
+
+ if (pkt_len > ENIC_TX_MAX_PKT_SIZE) {
+ rte_pktmbuf_free(tx_pkt);
+ rte_atomic64_inc(&enic->soft_stats.tx_oversized);
+ continue;
+ }
+
if (nb_segs > wq_desc_avail) {
if (index > 0)
goto post;
goto done;
}
- pkt_len = tx_pkt->pkt_len;
- data_len = tx_pkt->data_len;
- ol_flags = tx_pkt->ol_flags;
mss = 0;
vlan_id = 0;
vlan_tag_insert = 0;
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 923690c0..7c51d3b5 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -1315,6 +1315,7 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
fm10k_hw_stats_strings[count].offset);
+ xstats[count].id = count;
count++;
}
@@ -1324,12 +1325,14 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
xstats[count].value =
*(uint64_t *)(((char *)&hw_stats->q[q]) +
fm10k_hw_stats_rx_q_strings[i].offset);
+ xstats[count].id = count;
count++;
}
for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
xstats[count].value =
*(uint64_t *)(((char *)&hw_stats->q[q]) +
fm10k_hw_stats_tx_q_strings[i].offset);
+ xstats[count].id = count;
count++;
}
}
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 67778baf..bf7e5a05 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1628,6 +1628,8 @@ i40e_phy_conf_link(struct i40e_hw *hw,
/* use get_phy_abilities_resp value for the rest */
phy_conf.phy_type = phy_ab.phy_type;
+ phy_conf.phy_type_ext = phy_ab.phy_type_ext;
+ phy_conf.fec_config = phy_ab.mod_type_ext;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
phy_conf.low_power_ctrl = phy_ab.d3_lpan;
@@ -1653,8 +1655,7 @@ i40e_apply_link_speed(struct rte_eth_dev *dev)
struct rte_eth_conf *conf = &dev->data->dev_conf;
speed = i40e_parse_link_speeds(conf->link_speeds);
- if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
- abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED))
abilities |= I40E_AQ_PHY_AN_ENABLED;
abilities |= I40E_AQ_PHY_LINK_ENABLED;
@@ -1875,18 +1876,17 @@ i40e_dev_close(struct rte_eth_dev *dev)
/* shutdown and destroy the HMC */
i40e_shutdown_lan_hmc(hw);
- /* release all the existing VSIs and VEBs */
- i40e_fdir_teardown(pf);
- i40e_vsi_release(pf->main_vsi);
-
for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) {
i40e_vsi_release(pf->vmdq[i].vsi);
pf->vmdq[i].vsi = NULL;
}
-
rte_free(pf->vmdq);
pf->vmdq = NULL;
+ /* release all the existing VSIs and VEBs */
+ i40e_fdir_teardown(pf);
+ i40e_vsi_release(pf->main_vsi);
+
/* shutdown the adminq */
i40e_aq_queue_shutdown(hw, true);
i40e_shutdown_adminq(hw);
@@ -1990,8 +1990,7 @@ i40e_dev_set_link_down(struct rte_eth_dev *dev)
uint8_t abilities = 0;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types))
- abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
return i40e_phy_conf_link(hw, abilities, speed);
}
@@ -2025,11 +2024,11 @@ i40e_dev_link_update(struct rte_eth_dev *dev,
}
link.link_status = link_status.link_info & I40E_AQ_LINK_UP;
- if (!wait_to_complete)
+ if (!wait_to_complete || link.link_status)
break;
rte_delay_ms(CHECK_INTERVAL);
- } while (!link.link_status && rep_cnt--);
+ } while (--rep_cnt);
if (!link.link_status)
goto out;
@@ -2532,6 +2531,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (i = 0; i < I40E_NB_ETH_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) +
rte_i40e_stats_strings[i].offset);
+ xstats[count].id = count;
count++;
}
@@ -2539,6 +2539,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_i40e_hw_port_strings[i].offset);
+ xstats[count].id = count;
count++;
}
@@ -2548,6 +2549,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
*(uint64_t *)(((char *)hw_stats) +
rte_i40e_rxq_prio_strings[i].offset +
(sizeof(uint64_t) * prio));
+ xstats[count].id = count;
count++;
}
}
@@ -2558,6 +2560,7 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
*(uint64_t *)(((char *)hw_stats) +
rte_i40e_txq_prio_strings[i].offset +
(sizeof(uint64_t) * prio));
+ xstats[count].id = count;
count++;
}
}
@@ -4136,6 +4139,9 @@ i40e_vsi_release(struct i40e_vsi *vsi)
if (!vsi)
return I40E_SUCCESS;
+ if (!vsi->adapter)
+ return -EFAULT;
+
user_param = vsi->user_param;
pf = I40E_VSI_TO_PF(vsi);
@@ -5844,7 +5850,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
static int
i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
{
- int i, num;
+ int i, j, num;
struct i40e_mac_filter *f;
struct i40e_macvlan_filter *mv_f;
int ret = I40E_SUCCESS;
@@ -5869,6 +5875,7 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
TAILQ_FOREACH(f, &vsi->mac_list, next) {
(void)rte_memcpy(&mv_f[i].macaddr,
&f->mac_info.mac_addr, ETH_ADDR_LEN);
+ mv_f[i].filter_type = f->mac_info.filter_type;
mv_f[i].vlan_id = 0;
i++;
}
@@ -5878,6 +5885,8 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi)
vsi->vlan_num, &f->mac_info.mac_addr);
if (ret != I40E_SUCCESS)
goto DONE;
+ for (j = i; j < i + vsi->vlan_num; j++)
+ mv_f[j].filter_type = f->mac_info.filter_type;
i += vsi->vlan_num;
}
}
@@ -8275,6 +8284,10 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
#define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200
#define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08
+/* For X722 */
+#define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200
+#define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200
+
/* For X710 */
#define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303
/* For XL710 */
@@ -8297,7 +8310,6 @@ i40e_dev_sync_phy_type(struct i40e_hw *hw)
return 0;
}
-
static void
i40e_configure_registers(struct i40e_hw *hw)
{
@@ -8305,8 +8317,8 @@ i40e_configure_registers(struct i40e_hw *hw)
uint32_t addr;
uint64_t val;
} reg_table[] = {
- {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE},
- {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE},
+ {I40E_GL_SWR_PRI_JOIN_MAP_0, 0},
+ {I40E_GL_SWR_PRI_JOIN_MAP_2, 0},
{I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */
};
uint64_t reg;
@@ -8314,6 +8326,24 @@ i40e_configure_registers(struct i40e_hw *hw)
int ret;
for (i = 0; i < RTE_DIM(reg_table); i++) {
+ if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) {
+ if (hw->mac.type == I40E_MAC_X722) /* For X722 */
+ reg_table[i].val =
+ I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE;
+ else /* For X710/XL710/XXV710 */
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE;
+ }
+
+ if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) {
+ if (hw->mac.type == I40E_MAC_X722) /* For X722 */
+ reg_table[i].val =
+ I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE;
+ else /* For X710/XL710/XXV710 */
+ reg_table[i].val =
+ I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE;
+ }
+
if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) {
if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */
I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 298cef48..5f3ecd9a 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -227,7 +227,7 @@ struct i40e_bw_info {
/* Relative credits within same TC with respect to other VSIs or Comps */
uint8_t bw_ets_share_credits[I40E_MAX_TRAFFIC_CLASS];
/* Bandwidth limit per TC */
- uint8_t bw_ets_credits[I40E_MAX_TRAFFIC_CLASS];
+ uint16_t bw_ets_credits[I40E_MAX_TRAFFIC_CLASS];
/* Max bandwidth limit per TC */
uint8_t bw_ets_max[I40E_MAX_TRAFFIC_CLASS];
};
@@ -527,7 +527,7 @@ struct i40e_vf {
enum i40e_aq_link_speed link_speed;
bool vf_reset;
volatile uint32_t pend_cmd; /* pending command not finished yet */
- uint32_t cmd_retval; /* return value of the cmd response from PF */
+ int32_t cmd_retval; /* return value of the cmd response from PF */
u16 pend_msg; /* flags indicates events from pf not handled yet */
uint8_t *aq_resp; /* buffer to store the adminq response from PF */
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index aa306d61..640d316a 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -176,11 +176,11 @@ static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
rx_unknown_protocol)},
{"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
- {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
- {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
- {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
- {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
- {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_bytes)},
+ {"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
+ {"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
+ {"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
+ {"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
+ {"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)},
};
#define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
@@ -361,6 +361,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
err = -1;
do {
ret = i40evf_read_pfmsg(dev, &info);
+ vf->cmd_retval = info.result;
if (ret == I40EVF_MSG_CMD) {
err = 0;
break;
@@ -965,7 +966,7 @@ i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
pstats->rx_broadcast;
stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
pstats->tx_unicast;
- stats->ierrors = pstats->rx_discards;
+ stats->imissed = pstats->rx_discards;
stats->oerrors = pstats->tx_errors + pstats->tx_discards;
stats->ibytes = pstats->rx_bytes;
stats->obytes = pstats->tx_bytes;
@@ -1336,8 +1337,9 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_arq_event_info info;
- struct i40e_virtchnl_msg *v_msg;
- uint16_t pending, opcode;
+ uint16_t pending, aq_opc;
+ enum i40e_virtchnl_ops msg_opc;
+ enum i40e_status_code msg_ret;
int ret;
info.buf_len = I40E_AQ_BUF_SZ;
@@ -1346,7 +1348,6 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
return;
}
info.msg_buf = vf->aq_resp;
- v_msg = (struct i40e_virtchnl_msg *)&info.desc;
pending = 1;
while (pending) {
@@ -1357,32 +1358,39 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
"ret: %d", ret);
break;
}
- opcode = rte_le_to_cpu_16(info.desc.opcode);
-
- switch (opcode) {
+ aq_opc = rte_le_to_cpu_16(info.desc.opcode);
+ /* For the message sent from pf to vf, opcode is stored in
+ * cookie_high of struct i40e_aq_desc, while return error code
+ * are stored in cookie_low, Which is done by
+ * i40e_aq_send_msg_to_vf in PF driver.*/
+ msg_opc = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(
+ info.desc.cookie_high);
+ msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
+ info.desc.cookie_low);
+ switch (aq_opc) {
case i40e_aqc_opc_send_msg_to_vf:
- if (v_msg->v_opcode == I40E_VIRTCHNL_OP_EVENT)
+ if (msg_opc == I40E_VIRTCHNL_OP_EVENT)
/* process event*/
i40evf_handle_pf_event(dev, info.msg_buf,
info.msg_len);
else {
/* read message and it's expected one */
- if (v_msg->v_opcode == vf->pend_cmd) {
- vf->cmd_retval = v_msg->v_retval;
+ if (msg_opc == vf->pend_cmd) {
+ vf->cmd_retval = msg_ret;
/* prevent compiler reordering */
rte_compiler_barrier();
_clear_cmd(vf);
} else
PMD_DRV_LOG(ERR, "command mismatch,"
"expect %u, get %u",
- vf->pend_cmd, v_msg->v_opcode);
+ vf->pend_cmd, msg_opc);
PMD_DRV_LOG(DEBUG, "adminq response is received,"
- " opcode = %d\n", v_msg->v_opcode);
+ " opcode = %d\n", msg_opc);
}
break;
default:
PMD_DRV_LOG(ERR, "Request %u is not supported yet",
- opcode);
+ aq_opc);
break;
}
}
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index ddfc140d..97b8eccc 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -138,7 +138,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
abs_vf_id = vf_id + hw->func_caps.vf_base_id;
/* Notify VF that we are in VFR progress */
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_INPROGRESS);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_INPROGRESS);
/*
* If require a SW VF reset, a VFLR interrupt will be generated,
@@ -219,7 +219,7 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
}
/* Reset done, Set COMPLETE flag and clear reset bit */
- I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_PF_VFR_COMPLETED);
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_COMPLETED);
val = I40E_READ_REG(hw, I40E_VPGEN_VFRTRIG(vf_id));
val &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
I40E_WRITE_REG(hw, I40E_VPGEN_VFRTRIG(vf_id), val);
@@ -247,6 +247,8 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset)
return -EFAULT;
}
+ I40E_WRITE_REG(hw, I40E_VFGEN_RSTAT1(vf_id), I40E_VFR_VFACTIVE);
+
return ret;
}
diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h
index cddc45cf..244bac37 100644
--- a/drivers/net/i40e/i40e_pf.h
+++ b/drivers/net/i40e/i40e_pf.h
@@ -48,11 +48,6 @@
#define I40E_DPDK_OFFSET 0x100
-enum i40e_pf_vfr_state {
- I40E_PF_VFR_INPROGRESS = 0,
- I40E_PF_VFR_COMPLETED = 1,
-};
-
/* DPDK pf driver specific command to VF */
enum i40e_virtchnl_ops_dpdk {
/*
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 7ae7d9fb..1b25b2f2 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -138,8 +138,11 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword)
uint64_t error_bits = (qword >> I40E_RXD_QW1_ERROR_SHIFT);
#define I40E_RX_ERR_BITS 0x3f
- if (likely((error_bits & I40E_RX_ERR_BITS) == 0))
+ if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) {
+ flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
return flags;
+ }
+
if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT)))
flags |= PKT_RX_IP_CKSUM_BAD;
else
@@ -1916,8 +1919,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
return I40E_ERR_PARAM;
}
if (tx_free_thresh >= (nb_desc - 3)) {
- PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
- "tx_free_thresh must be less than the "
+ PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
"number of TX descriptors minus 3. "
"(tx_free_thresh=%u port=%d queue=%d)",
(unsigned int)tx_free_thresh,
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 6cb5dce9..990520f3 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -71,6 +71,7 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
/* free up last mbuf */
struct rte_mbuf *secondlast = start;
+ start->nb_segs--;
while (secondlast->next != end)
secondlast = secondlast->next;
secondlast->data_len -= (rxq->crc_len -
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index 7c84a41a..b95cc8e1 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -148,6 +148,20 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
const __m128i rss_vlan_msk = _mm_set_epi32(
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
+ const __m128i cksum_mask = _mm_set_epi32(
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD);
+
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
@@ -160,14 +174,17 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
0, 0, PKT_RX_FDIR, 0);
const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_L4_CKSUM_BAD,
- PKT_RX_IP_CKSUM_BAD,
- 0);
+ /* shift right 1 bit to make sure it not exceed 255 */
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
@@ -181,6 +198,10 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
l3_l4e = _mm_srli_epi32(vlan1, 22);
l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
+ /* then we shift left 1 bit */
+ l3_l4e = _mm_slli_epi32(l3_l4e, 1);
+ /* we need to mask out the reduntant bits */
+ l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
vlan0 = _mm_or_si128(vlan0, rss);
vlan0 = _mm_or_si128(vlan0, l3_l4e);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index edc9b22c..bac36e0d 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -2902,6 +2902,7 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_stats_strings[i].offset);
+ xstats[count].id = count;
count++;
}
@@ -2911,6 +2912,7 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_rxq_strings[stat].offset +
(sizeof(uint64_t) * i));
+ xstats[count].id = count;
count++;
}
}
@@ -2921,6 +2923,7 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_txq_strings[stat].offset +
(sizeof(uint64_t) * i));
+ xstats[count].id = count;
count++;
}
}
@@ -3168,7 +3171,7 @@ ixgbevf_dev_info_get(struct rte_eth_dev *dev,
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
- dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
+ dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = dev->pci_dev->max_vfs;
@@ -3471,7 +3474,6 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int64_t timeout;
struct rte_eth_link link;
- int intr_enable_delay = false;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -3504,20 +3506,19 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
ixgbe_dev_link_status_print(dev);
-
- intr_enable_delay = true;
- }
-
- if (intr_enable_delay) {
+ intr->mask_original = intr->mask;
+ /* only disable lsc interrupt */
+ intr->mask &= ~IXGBE_EIMS_LSC;
if (rte_eal_alarm_set(timeout * 1000,
ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
- } else {
- PMD_DRV_LOG(DEBUG, "enable intr immediately");
- ixgbe_enable_intr(dev);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ else
+ intr->mask = intr->mask_original;
}
+ PMD_DRV_LOG(DEBUG, "enable intr immediately");
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(&dev->pci_dev->intr_handle);
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 4ff6338e..a4e2996a 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -165,6 +165,8 @@ struct ixgbe_hw_fdir_info {
struct ixgbe_interrupt {
uint32_t flags;
uint32_t mask;
+ /*to save original mask during delayed handler */
+ uint32_t mask_original;
};
struct ixgbe_stat_mapping_registers {
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index b2d9f454..c61ce470 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -1402,17 +1402,19 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST;
i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
/* Read desc statuses backwards to avoid race condition */
- for (j = LOOK_AHEAD-1; j >= 0; --j)
+ for (j = 0; j < LOOK_AHEAD; j++)
s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);
- for (j = LOOK_AHEAD - 1; j >= 0; --j)
- pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
- lo_dword.data);
+ rte_smp_rmb();
/* Compute how many status bits were set */
- nb_dd = 0;
- for (j = 0; j < LOOK_AHEAD; ++j)
- nb_dd += s[j] & IXGBE_RXDADV_STAT_DD;
+ for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
+ (s[nb_dd] & IXGBE_RXDADV_STAT_DD); nb_dd++)
+ ;
+
+ for (j = 0; j < nb_dd; j++)
+ pkt_info[j] = rte_le_to_cpu_32(rxdp[j].wb.lower.
+ lo_dword.data);
nb_rx += nb_dd;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index f96cc85c..e2715cb9 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -196,7 +196,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
struct ixgbe_rx_entry *sw_ring;
uint16_t nb_pkts_recd;
int pos;
- uint64_t var;
uint8x16_t shuf_msk = {
0xFF, 0xFF,
0xFF, 0xFF, /* skip 32 bits pkt_type */
@@ -255,26 +254,24 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint64x2_t mbp1, mbp2;
uint8x16_t staterr;
uint16x8_t tmp;
+ uint32_t var = 0;
uint32_t stat;
/* B.1 load 1 mbuf point */
mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
- /* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
- descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
- rte_rmb();
-
/* B.2 copy 2 mbuf point into rx_pkts */
vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
/* B.1 load 1 mbuf point */
mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
- descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
- /* B.1 load 2 mbuf point */
- descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
+ /* A. load 4 pkts descs */
descs[0] = vld1q_u64((uint64_t *)(rxdp));
+ descs[1] = vld1q_u64((uint64_t *)(rxdp + 1));
+ descs[2] = vld1q_u64((uint64_t *)(rxdp + 2));
+ descs[3] = vld1q_u64((uint64_t *)(rxdp + 3));
+ rte_smp_rmb();
/* B.2 copy 2 mbuf point into rx_pkts */
vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
@@ -349,11 +346,19 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
+ stat &= IXGBE_VPMD_DESC_DD_MASK;
+
/* C.4 calc avaialbe number of desc */
- var = __builtin_popcount(stat & IXGBE_VPMD_DESC_DD_MASK);
- nb_pkts_recd += var;
- if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
+ if (likely(stat != IXGBE_VPMD_DESC_DD_MASK)) {
+ while (stat & 0x01) {
+ ++var;
+ stat = stat >> 8;
+ }
+ nb_pkts_recd += var;
break;
+ } else {
+ nb_pkts_recd += RTE_IXGBE_DESCS_PER_LOOP;
+ }
}
/* Update our internal tail pointer */
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index da61a856..6d43a977 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -2961,19 +2961,25 @@ rxq_cq_to_pkt_type(uint32_t flags)
if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
pkt_type =
TRANSPOSE(flags,
- IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, RTE_PTYPE_L3_IPV4) |
+ IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
TRANSPOSE(flags,
- IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, RTE_PTYPE_L3_IPV6) |
+ IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_INNER_L3_IPV4) |
+ IBV_EXP_CQ_RX_IPV4_PACKET,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_INNER_L3_IPV6);
+ IBV_EXP_CQ_RX_IPV6_PACKET,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
else
pkt_type =
TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_L3_IPV4) |
+ IBV_EXP_CQ_RX_IPV4_PACKET,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN) |
TRANSPOSE(flags,
- IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_L3_IPV6);
+ IBV_EXP_CQ_RX_IPV6_PACKET,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN);
return pkt_type;
}
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index 90cc35e4..cb45fd0f 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -330,8 +330,10 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs)
if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i],
mlx5_args_check, priv);
- if (ret != 0)
+ if (ret != 0) {
+ rte_kvargs_free(kvlist);
return ret;
+ }
}
}
rte_kvargs_free(kvlist);
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index c0f73e93..06cfd016 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -43,9 +43,11 @@
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
+#include <sys/utsname.h>
#include <netinet/in.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
+#include <linux/version.h>
#include <fcntl.h>
/* DPDK headers don't like -pedantic. */
@@ -67,6 +69,57 @@
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
+/* Add defines in case the running kernel is not the same as user headers. */
+#ifndef ETHTOOL_GLINKSETTINGS
+struct ethtool_link_settings {
+ uint32_t cmd;
+ uint32_t speed;
+ uint8_t duplex;
+ uint8_t port;
+ uint8_t phy_address;
+ uint8_t autoneg;
+ uint8_t mdio_support;
+ uint8_t eth_to_mdix;
+ uint8_t eth_tp_mdix_ctrl;
+ int8_t link_mode_masks_nwords;
+ uint32_t reserved[8];
+ uint32_t link_mode_masks[];
+};
+
+#define ETHTOOL_GLINKSETTINGS 0x0000004c
+#define ETHTOOL_LINK_MODE_1000baseT_Full_BIT 5
+#define ETHTOOL_LINK_MODE_Autoneg_BIT 6
+#define ETHTOOL_LINK_MODE_1000baseKX_Full_BIT 17
+#define ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT 18
+#define ETHTOOL_LINK_MODE_10000baseKR_Full_BIT 19
+#define ETHTOOL_LINK_MODE_10000baseR_FEC_BIT 20
+#define ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT 21
+#define ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT 22
+#define ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT 23
+#define ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT 24
+#define ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT 25
+#define ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT 26
+#define ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT 27
+#define ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT 28
+#define ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT 29
+#define ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT 30
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_25G
+#define ETHTOOL_LINK_MODE_25000baseCR_Full_BIT 31
+#define ETHTOOL_LINK_MODE_25000baseKR_Full_BIT 32
+#define ETHTOOL_LINK_MODE_25000baseSR_Full_BIT 33
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_50G
+#define ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT 34
+#define ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT 35
+#endif
+#ifndef HAVE_ETHTOOL_LINK_MODE_100G
+#define ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT 36
+#define ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT 37
+#define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
+#define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
+#endif
+
/**
* Return private structure associated with an Ethernet device.
*
@@ -690,8 +743,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
}
/**
- * Retrieve physical link information (unlocked version using new ioctl from
- * Linux 4.5).
+ * Retrieve physical link information (unlocked version using new ioctl).
*
* @param dev
* Pointer to Ethernet device structure.
@@ -701,7 +753,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
static int
mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
{
-#ifdef ETHTOOL_GLINKSETTINGS
struct priv *priv = mlx5_get_priv(dev);
struct ethtool_link_settings edata = {
.cmd = ETHTOOL_GLINKSETTINGS,
@@ -728,7 +779,6 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
sc = edata.link_mode_masks[0] |
((uint64_t)edata.link_mode_masks[1] << 32);
priv->link_speed_capa = 0;
- /* Link speeds available in kernel v4.5. */
if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
if (sc & (ETHTOOL_LINK_MODE_1000baseT_Full_BIT |
@@ -751,25 +801,18 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT |
ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_56G;
- /* Link speeds available in kernel v4.6. */
-#ifdef HAVE_ETHTOOL_LINK_MODE_25G
if (sc & (ETHTOOL_LINK_MODE_25000baseCR_Full_BIT |
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT |
ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_25G;
-#endif
-#ifdef HAVE_ETHTOOL_LINK_MODE_50G
if (sc & (ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT |
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_50G;
-#endif
-#ifdef HAVE_ETHTOOL_LINK_MODE_100G
if (sc & (ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT |
ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT |
ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
priv->link_speed_capa |= ETH_LINK_SPEED_100G;
-#endif
dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
@@ -779,10 +822,6 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
dev->data->dev_link = dev_link;
return 0;
}
-#else
- (void)dev;
- (void)wait_to_complete;
-#endif
/* Link status is still the same. */
return -1;
}
@@ -798,12 +837,15 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
int
mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
{
- int ret;
-
- ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete);
- if (ret < 0)
- ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete);
- return ret;
+ struct utsname utsname;
+ int ver[3];
+
+ if (uname(&utsname) == -1 ||
+ sscanf(utsname.release, "%d.%d.%d",
+ &ver[0], &ver[1], &ver[2]) != 3 ||
+ KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
+ return mlx5_link_update_unlocked_gset(dev, wait_to_complete);
+ return mlx5_link_update_unlocked_gs(dev, wait_to_complete);
}
/**
@@ -1141,7 +1183,7 @@ static int
priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
{
struct ibv_async_event event;
- int port_change = 0;
+ struct rte_eth_link *link = &dev->data->dev_link;
int ret = 0;
/* Read all message and acknowledge them. */
@@ -1149,29 +1191,24 @@ priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
if (ibv_get_async_event(priv->ctx, &event))
break;
- if (event.event_type == IBV_EVENT_PORT_ACTIVE ||
- event.event_type == IBV_EVENT_PORT_ERR)
- port_change = 1;
- else
+ if (event.event_type != IBV_EVENT_PORT_ACTIVE &&
+ event.event_type != IBV_EVENT_PORT_ERR)
DEBUG("event type %d on port %d not handled",
event.event_type, event.element.port_num);
ibv_ack_async_event(&event);
}
-
- if (port_change ^ priv->pending_alarm) {
- struct rte_eth_link *link = &dev->data->dev_link;
-
- priv->pending_alarm = 0;
- mlx5_link_update_unlocked(dev, 0);
- if (((link->link_speed == 0) && link->link_status) ||
- ((link->link_speed != 0) && !link->link_status)) {
+ mlx5_link_update(dev, 0);
+ if (((link->link_speed == 0) && link->link_status) ||
+ ((link->link_speed != 0) && !link->link_status)) {
+ if (!priv->pending_alarm) {
/* Inconsistent status, check again later. */
priv->pending_alarm = 1;
rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
mlx5_dev_link_status_handler,
dev);
- } else
- ret = 1;
+ }
+ } else {
+ ret = 1;
}
return ret;
}
@@ -1191,6 +1228,7 @@ mlx5_dev_link_status_handler(void *arg)
priv_lock(priv);
assert(priv->pending_alarm == 1);
+ priv->pending_alarm = 0;
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
if (ret)
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index 7f31a2f7..ed088eea 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -61,9 +61,6 @@
/* Invalidate a CQE. */
#define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4)
-/* CQE value to inform that VLAN is stripped. */
-#define MLX5_CQE_VLAN_STRIPPED 0x1
-
/* Maximum number of packets a multi-packet WQE can handle. */
#define MLX5_MPW_DSEG_MAX 5
@@ -84,26 +81,41 @@
#define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */
#endif
-/* IPv4 packet. */
-#define MLX5_CQE_RX_IPV4_PACKET (1u << 2)
+/* CQE value to inform that VLAN is stripped. */
+#define MLX5_CQE_VLAN_STRIPPED (1u << 0)
+
+/* IPv4 options. */
+#define MLX5_CQE_RX_IP_EXT_OPTS_PACKET (1u << 1)
/* IPv6 packet. */
-#define MLX5_CQE_RX_IPV6_PACKET (1u << 3)
+#define MLX5_CQE_RX_IPV6_PACKET (1u << 2)
+
+/* IPv4 packet. */
+#define MLX5_CQE_RX_IPV4_PACKET (1u << 3)
-/* Outer IPv4 packet. */
-#define MLX5_CQE_RX_OUTER_IPV4_PACKET (1u << 7)
+/* TCP packet. */
+#define MLX5_CQE_RX_TCP_PACKET (1u << 4)
-/* Outer IPv6 packet. */
-#define MLX5_CQE_RX_OUTER_IPV6_PACKET (1u << 8)
+/* UDP packet. */
+#define MLX5_CQE_RX_UDP_PACKET (1u << 5)
-/* Tunnel packet bit in the CQE. */
-#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 4)
+/* IP is fragmented. */
+#define MLX5_CQE_RX_IP_FRAG_PACKET (1u << 7)
+
+/* L2 header is valid. */
+#define MLX5_CQE_RX_L2_HDR_VALID (1u << 8)
-/* Outer IP checksum OK. */
-#define MLX5_CQE_RX_OUTER_IP_CSUM_OK (1u << 5)
+/* L3 header is valid. */
+#define MLX5_CQE_RX_L3_HDR_VALID (1u << 9)
-/* Outer UDP header and checksum OK. */
-#define MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK (1u << 6)
+/* L4 header is valid. */
+#define MLX5_CQE_RX_L4_HDR_VALID (1u << 10)
+
+/* Outer packet, 0 IPv4, 1 IPv6. */
+#define MLX5_CQE_RX_OUTER_PACKET (1u << 1)
+
+/* Tunnel packet bit in the CQE. */
+#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
/* Subset of struct mlx5_wqe_eth_seg. */
struct mlx5_wqe_eth_seg_small {
@@ -163,8 +175,7 @@ struct mlx5_cqe {
uint32_t rx_hash_res;
uint8_t rx_hash_type;
uint8_t rsvd1[11];
- uint8_t hds_ip_ext;
- uint8_t l4_hdr_type_etc;
+ uint16_t hdr_type_etc;
uint16_t vlan_info;
uint8_t rsvd2[12];
uint32_t byte_cnt;
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 9b598014..3997b27a 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -69,6 +69,32 @@
#include "mlx5_defs.h"
#include "mlx5_prm.h"
+static inline int
+check_cqe(volatile struct mlx5_cqe *cqe,
+ unsigned int cqes_n, const uint16_t ci)
+ __attribute__((always_inline));
+
+static inline uint32_t
+txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
+ __attribute__((always_inline));
+
+static inline void
+mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
+ __attribute__((always_inline));
+
+static inline uint32_t
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
+ __attribute__((always_inline));
+
+static inline int
+mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
+ uint16_t cqe_cnt, uint32_t *rss_hash)
+ __attribute__((always_inline));
+
+static inline uint32_t
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
+ __attribute__((always_inline));
+
#ifndef NDEBUG
/**
@@ -98,11 +124,6 @@ check_cqe_seen(volatile struct mlx5_cqe *cqe)
#endif /* NDEBUG */
-static inline int
-check_cqe(volatile struct mlx5_cqe *cqe,
- unsigned int cqes_n, const uint16_t ci)
- __attribute__((always_inline));
-
/**
* Check whether CQE is valid.
*
@@ -199,7 +220,7 @@ txq_complete(struct txq *txq)
} while (1);
if (unlikely(cqe == NULL))
return;
- wqe = &(*txq->wqes)[htons(cqe->wqe_counter) &
+ wqe = &(*txq->wqes)[ntohs(cqe->wqe_counter) &
((1 << txq->wqe_n) - 1)].hdr;
elts_tail = wqe->ctrl[3];
assert(elts_tail < (1 << txq->wqe_n));
@@ -246,10 +267,6 @@ txq_mb2mp(struct rte_mbuf *buf)
return buf->pool;
}
-static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
- __attribute__((always_inline));
-
/**
* Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[].
* Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full,
@@ -292,23 +309,20 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
*
* @param txq
* Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the last WQE posted in the NIC.
*/
static inline void
-mlx5_tx_dbrec(struct txq *txq)
+mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
{
- uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset);
- uint32_t data[4] = {
- htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
- htonl(txq->qp_num_8s),
- 0,
- 0,
- };
+ uint64_t *dst = (uint64_t *)((uintptr_t)txq->bf_reg);
+ volatile uint64_t *src = ((volatile uint64_t *)wqe);
+
rte_wmb();
*txq->qp_db = htonl(txq->wqe_ci);
/* Ensure ordering between DB record and BF copy. */
rte_wmb();
- memcpy(dst, (uint8_t *)data, 16);
- txq->bf_offset ^= (1 << txq->bf_buf_size);
+ *dst = *src;
}
/**
@@ -594,7 +608,7 @@ next_pkt:
txq->stats.opackets += i;
#endif
/* Ring QP doorbell. */
- mlx5_tx_dbrec(txq);
+ mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe);
txq->elts_head = elts_head;
return i;
}
@@ -802,7 +816,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Ring QP doorbell. */
if (mpw.state == MLX5_MPW_STATE_OPENED)
mlx5_mpw_close(txq, &mpw);
- mlx5_tx_dbrec(txq);
+ mlx5_tx_dbrec(txq, mpw.wqe);
txq->elts_head = elts_head;
return i;
}
@@ -1028,6 +1042,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
mpw.data.raw =
(volatile void *)&(*txq->wqes)[0];
++mpw.pkts_n;
+ mpw.total_len += length;
++j;
if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) {
mlx5_mpw_inline_close(txq, &mpw);
@@ -1037,7 +1052,6 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
inline_room -= length;
}
}
- mpw.total_len += length;
elts_head = elts_head_next;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
@@ -1071,7 +1085,7 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
mlx5_mpw_inline_close(txq, &mpw);
else if (mpw.state == MLX5_MPW_STATE_OPENED)
mlx5_mpw_close(txq, &mpw);
- mlx5_tx_dbrec(txq);
+ mlx5_tx_dbrec(txq, mpw.wqe);
txq->elts_head = elts_head;
return i;
}
@@ -1091,30 +1105,28 @@ static inline uint32_t
rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
{
uint32_t pkt_type;
- uint8_t flags = cqe->l4_hdr_type_etc;
+ uint16_t flags = ntohs(cqe->hdr_type_etc);
- if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET)
+ if (cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) {
pkt_type =
TRANSPOSE(flags,
- MLX5_CQE_RX_OUTER_IPV4_PACKET,
- RTE_PTYPE_L3_IPV4) |
- TRANSPOSE(flags,
- MLX5_CQE_RX_OUTER_IPV6_PACKET,
- RTE_PTYPE_L3_IPV6) |
- TRANSPOSE(flags,
MLX5_CQE_RX_IPV4_PACKET,
- RTE_PTYPE_INNER_L3_IPV4) |
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN) |
TRANSPOSE(flags,
MLX5_CQE_RX_IPV6_PACKET,
- RTE_PTYPE_INNER_L3_IPV6);
- else
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN);
+ pkt_type |= ((cqe->pkt_info & MLX5_CQE_RX_OUTER_PACKET) ?
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN :
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
+ } else {
pkt_type =
TRANSPOSE(flags,
MLX5_CQE_L3_HDR_TYPE_IPV6,
- RTE_PTYPE_L3_IPV6) |
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN) |
TRANSPOSE(flags,
MLX5_CQE_L3_HDR_TYPE_IPV4,
- RTE_PTYPE_L3_IPV4);
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN);
+ }
return pkt_type;
}
@@ -1232,28 +1244,22 @@ static inline uint32_t
rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
{
uint32_t ol_flags = 0;
- uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK;
- uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK;
-
- if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) ||
- (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6))
- ol_flags |= TRANSPOSE(cqe->hds_ip_ext,
- MLX5_CQE_L3_OK,
- PKT_RX_IP_CKSUM_GOOD);
- if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) ||
- (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) ||
- (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) ||
- (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP))
- ol_flags |= TRANSPOSE(cqe->hds_ip_ext,
- MLX5_CQE_L4_OK,
- PKT_RX_L4_CKSUM_GOOD);
+ uint16_t flags = ntohs(cqe->hdr_type_etc);
+
+ ol_flags =
+ TRANSPOSE(flags,
+ MLX5_CQE_RX_L3_HDR_VALID,
+ PKT_RX_IP_CKSUM_GOOD) |
+ TRANSPOSE(flags,
+ MLX5_CQE_RX_L4_HDR_VALID,
+ PKT_RX_L4_CKSUM_GOOD);
if ((cqe->pkt_info & MLX5_CQE_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
ol_flags |=
- TRANSPOSE(cqe->l4_hdr_type_etc,
- MLX5_CQE_RX_OUTER_IP_CSUM_OK,
+ TRANSPOSE(flags,
+ MLX5_CQE_RX_L3_HDR_VALID,
PKT_RX_IP_CKSUM_GOOD) |
- TRANSPOSE(cqe->l4_hdr_type_etc,
- MLX5_CQE_RX_OUTER_TCP_UDP_CSUM_OK,
+ TRANSPOSE(flags,
+ MLX5_CQE_RX_L4_HDR_VALID,
PKT_RX_L4_CKSUM_GOOD);
return ol_flags;
}
@@ -1310,10 +1316,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
}
while (pkt != seg) {
assert(pkt != (*rxq->elts)[idx]);
- seg = NEXT(pkt);
+ rep = NEXT(pkt);
rte_mbuf_refcnt_set(pkt, 0);
__rte_mbuf_raw_free(pkt);
- pkt = seg;
+ pkt = rep;
}
break;
}
@@ -1338,7 +1344,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Update packet information. */
pkt->packet_type = 0;
pkt->ol_flags = 0;
- if (rxq->rss_hash) {
+ if (rss_hash_res && rxq->rss_hash) {
pkt->hash.rss = rss_hash_res;
pkt->ol_flags = PKT_RX_RSS_HASH;
}
@@ -1350,7 +1356,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt->ol_flags |=
rxq_cq_to_ol_flags(rxq, cqe);
}
- if (cqe->l4_hdr_type_etc &
+ if (cqe->hdr_type_etc &
MLX5_CQE_VLAN_STRIPPED) {
pkt->ol_flags |= PKT_RX_VLAN_PKT |
PKT_RX_VLAN_STRIPPED;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 5708c2a7..909d80e6 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -251,8 +251,6 @@ struct txq {
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
- uint16_t bf_buf_size:4; /* Log2 Blueflame size. */
- uint16_t bf_offset; /* Blueflame offset. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 053665d5..439908fc 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -220,8 +220,6 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
tmpl->txq.bf_reg = qp->gen_data.bf->reg;
- tmpl->txq.bf_offset = qp->gen_data.bf->offset;
- tmpl->txq.bf_buf_size = log2above(qp->gen_data.bf->buf_size);
tmpl->txq.cq_db = cq->dbrec;
tmpl->txq.cqes =
(volatile struct mlx5_cqe (*)[])
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index c6b15874..099d82b3 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -1027,8 +1027,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -2043,9 +2043,9 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
new_ctrl = 0;
if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
- (mask & ETH_VLAN_FILTER_OFFLOAD))
- RTE_LOG(INFO, PMD, "Not support for ETH_VLAN_FILTER_OFFLOAD or"
- " ETH_VLAN_FILTER_EXTEND");
+ (mask & ETH_VLAN_EXTEND_OFFLOAD))
+ RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
+ " ETH_VLAN_EXTEND_OFFLOAD");
/* Enable vlan strip if it is not configured yet */
if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 0162f446..57b0b315 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -247,7 +247,7 @@ calculate_timestamp(struct timeval *ts) {
cycles = rte_get_timer_cycles() - start_cycles;
cur_time.tv_sec = cycles / hz;
- cur_time.tv_usec = (cycles % hz) * 10e6 / hz;
+ cur_time.tv_usec = (cycles % hz) * 1e6 / hz;
timeradd(&start_time, &cur_time, ts);
}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index e83eeb81..de08650b 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -89,7 +89,7 @@ voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
#define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
#define QM_STOP_CMD_GROUP_ID_OFFSET 1
#define QM_STOP_CMD_GROUP_ID_SHIFT 16
#define QM_STOP_CMD_GROUP_ID_MASK 15
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index ab886716..3c369aa5 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -30,7 +30,7 @@
24
#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
- 0xff << 24)
+ 0xffUL << 24) /* @DPDK */
#define XSDM_REG_OPERATION_GEN \
0xf80408UL
@@ -436,11 +436,11 @@
#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
#define XMAC_REG_CTRL_TX_EN (0x1 << 0)
#define XMAC_REG_CTRL_RX_EN (0x1 << 1)
-#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xffUL << 24) /* @DPDK */
#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff << 16)
#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16
#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff << 16)
-#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xff << 24)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xffUL << 24) /* @DPDK */
#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff << 0)
#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0
#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff << 0)
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
index 1ae6127b..30fded0f 100644
--- a/drivers/net/qede/qede_eth_if.c
+++ b/drivers/net/qede/qede_eth_if.c
@@ -310,86 +310,11 @@ qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
ecore_get_vport_stats(edev, stats);
}
-static int
-qed_configure_filter_ucast(struct ecore_dev *edev,
- struct qed_filter_ucast_params *params)
-{
- struct ecore_filter_ucast ucast;
-
- if (!params->vlan_valid && !params->mac_valid) {
- DP_NOTICE(edev, true,
- "Tried configuring a unicast filter,"
- "but both MAC and VLAN are not set\n");
- return -EINVAL;
- }
-
- memset(&ucast, 0, sizeof(ucast));
- switch (params->type) {
- case QED_FILTER_XCAST_TYPE_ADD:
- ucast.opcode = ECORE_FILTER_ADD;
- break;
- case QED_FILTER_XCAST_TYPE_DEL:
- ucast.opcode = ECORE_FILTER_REMOVE;
- break;
- case QED_FILTER_XCAST_TYPE_REPLACE:
- ucast.opcode = ECORE_FILTER_REPLACE;
- break;
- default:
- DP_NOTICE(edev, true, "Unknown unicast filter type %d\n",
- params->type);
- }
-
- if (params->vlan_valid && params->mac_valid) {
- ucast.type = ECORE_FILTER_MAC_VLAN;
- ether_addr_copy((struct ether_addr *)&params->mac,
- (struct ether_addr *)&ucast.mac);
- ucast.vlan = params->vlan;
- } else if (params->mac_valid) {
- ucast.type = ECORE_FILTER_MAC;
- ether_addr_copy((struct ether_addr *)&params->mac,
- (struct ether_addr *)&ucast.mac);
- } else {
- ucast.type = ECORE_FILTER_VLAN;
- ucast.vlan = params->vlan;
- }
-
- ucast.is_rx_filter = true;
- ucast.is_tx_filter = true;
-
- return ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
-}
-
-static int
-qed_configure_filter_mcast(struct ecore_dev *edev,
- struct qed_filter_mcast_params *params)
-{
- struct ecore_filter_mcast mcast;
- int i;
-
- memset(&mcast, 0, sizeof(mcast));
- switch (params->type) {
- case QED_FILTER_XCAST_TYPE_ADD:
- mcast.opcode = ECORE_FILTER_ADD;
- break;
- case QED_FILTER_XCAST_TYPE_DEL:
- mcast.opcode = ECORE_FILTER_REMOVE;
- break;
- default:
- DP_NOTICE(edev, true, "Unknown multicast filter type %d\n",
- params->type);
- }
-
- mcast.num_mc_addrs = params->num;
- for (i = 0; i < mcast.num_mc_addrs; i++)
- ether_addr_copy((struct ether_addr *)&params->mac[i],
- (struct ether_addr *)&mcast.mac[i]);
-
- return ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
-}
-
-int qed_configure_filter_rx_mode(struct ecore_dev *edev,
+int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
enum qed_filter_rx_mode_type type)
{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_filter_accept_flags flags;
memset(&flags, 0, sizeof(flags));
@@ -422,25 +347,6 @@ int qed_configure_filter_rx_mode(struct ecore_dev *edev,
ECORE_SPQ_MODE_CB, NULL);
}
-static int
-qed_configure_filter(struct ecore_dev *edev, struct qed_filter_params *params)
-{
- switch (params->type) {
- case QED_FILTER_TYPE_UCAST:
- return qed_configure_filter_ucast(edev, &params->filter.ucast);
- case QED_FILTER_TYPE_MCAST:
- return qed_configure_filter_mcast(edev, &params->filter.mcast);
- case QED_FILTER_TYPE_RX_MODE:
- return qed_configure_filter_rx_mode(edev,
- params->filter.
- accept_flags);
- default:
- DP_NOTICE(edev, true, "Unknown filter type %d\n",
- (int)params->type);
- return -EINVAL;
- }
-}
-
static const struct qed_eth_ops qed_eth_ops_pass = {
INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
@@ -455,7 +361,6 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
- INIT_STRUCT_FIELD(filter_config, &qed_configure_filter),
};
const struct qed_eth_ops *qed_get_eth_ops(void)
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
index 33655c39..ef4a4b55 100644
--- a/drivers/net/qede/qede_eth_if.h
+++ b/drivers/net/qede/qede_eth_if.h
@@ -26,12 +26,6 @@ enum qed_filter_rx_mode_type {
QED_FILTER_RX_MODE_TYPE_PROMISC,
};
-enum qed_filter_xcast_params_type {
- QED_FILTER_XCAST_TYPE_ADD,
- QED_FILTER_XCAST_TYPE_DEL,
- QED_FILTER_XCAST_TYPE_REPLACE,
-};
-
enum qed_filter_type {
QED_FILTER_TYPE_UCAST,
QED_FILTER_TYPE_MCAST,
@@ -93,31 +87,6 @@ struct qed_stop_txq_params {
uint8_t tx_queue_id;
};
-struct qed_filter_ucast_params {
- enum qed_filter_xcast_params_type type;
- uint8_t vlan_valid;
- uint16_t vlan;
- uint8_t mac_valid;
- unsigned char mac[ETHER_ADDR_LEN];
-};
-
-struct qed_filter_mcast_params {
- enum qed_filter_xcast_params_type type;
- uint8_t num;
- unsigned char mac[QEDE_MAX_MCAST_FILTERS][ETHER_ADDR_LEN];
-};
-
-union qed_filter_type_params {
- enum qed_filter_rx_mode_type accept_flags;
- struct qed_filter_ucast_params ucast;
- struct qed_filter_mcast_params mcast;
-};
-
-struct qed_filter_params {
- enum qed_filter_type type;
- union qed_filter_type_params filter;
-};
-
struct qed_eth_ops {
const struct qed_common_ops *common;
@@ -162,18 +131,15 @@ struct qed_eth_ops {
void (*get_vport_stats)(struct ecore_dev *edev,
struct ecore_eth_stats *stats);
-
- int (*filter_config)(struct ecore_dev *edev,
- struct qed_filter_params *params);
};
/* externs */
extern const struct qed_common_ops qed_common_ops_pass;
-const struct qed_eth_ops *qed_get_eth_ops();
+const struct qed_eth_ops *qed_get_eth_ops(void);
-int qed_configure_filter_rx_mode(struct ecore_dev *edev,
+int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
enum qed_filter_rx_mode_type type);
#endif /* _QEDE_ETH_IF_H */
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index d106dd0f..6d6fb9de 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -223,47 +223,181 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, "*********************************\n");
}
+static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
+{
+ memset(ucast, 0, sizeof(struct ecore_filter_ucast));
+ ucast->is_rx_filter = true;
+ ucast->is_tx_filter = true;
+ /* ucast->assert_on_error = true; - For debug */
+}
+
static int
-qede_set_ucast_rx_mac(struct qede_dev *qdev,
- enum qed_filter_xcast_params_type opcode,
- uint8_t mac[ETHER_ADDR_LEN])
+qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+ bool add)
{
- struct ecore_dev *edev = &qdev->edev;
- struct qed_filter_params filter_cmd;
-
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_UCAST;
- filter_cmd.filter.ucast.type = opcode;
- filter_cmd.filter.ucast.mac_valid = 1;
- rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN);
- return qdev->ops->filter_config(edev, &filter_cmd);
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_ucast_entry *tmp = NULL;
+ struct qede_ucast_entry *u;
+ struct ether_addr *mac_addr;
+
+ mac_addr = (struct ether_addr *)ucast->mac;
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
+ if ((memcmp(mac_addr, &tmp->mac,
+ ETHER_ADDR_LEN) == 0) &&
+ ucast->vlan == tmp->vlan) {
+ DP_ERR(edev, "Unicast MAC is already added"
+ " with vlan = %u, vni = %u\n",
+ ucast->vlan, ucast->vni);
+ return -EEXIST;
+ }
+ }
+ u = rte_malloc(NULL, sizeof(struct qede_ucast_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!u) {
+ DP_ERR(edev, "Did not allocate memory for ucast\n");
+ return -ENOMEM;
+ }
+ ether_addr_copy(mac_addr, &u->mac);
+ u->vlan = ucast->vlan;
+ SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
+ qdev->num_uc_addr++;
+ } else {
+ SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
+ if ((memcmp(mac_addr, &tmp->mac,
+ ETHER_ADDR_LEN) == 0) &&
+ ucast->vlan == tmp->vlan)
+ break;
+ }
+ if (tmp == NULL) {
+ DP_INFO(edev, "Unicast MAC is not found\n");
+ return -EINVAL;
+ }
+ SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list);
+ qdev->num_uc_addr--;
+ }
+
+ return 0;
}
-static void
-qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
- uint32_t index, __rte_unused uint32_t pool)
+static int
+qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast,
+ bool add)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- int rc;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ether_addr *mac_addr;
+ struct qede_mcast_entry *tmp = NULL;
+ struct qede_mcast_entry *m;
+
+ mac_addr = (struct ether_addr *)mcast->mac;
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+ if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) {
+ DP_ERR(edev,
+ "Multicast MAC is already added\n");
+ return -EEXIST;
+ }
+ }
+ m = rte_malloc(NULL, sizeof(struct qede_mcast_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!m) {
+ DP_ERR(edev,
+ "Did not allocate memory for mcast\n");
+ return -ENOMEM;
+ }
+ ether_addr_copy(mac_addr, &m->mac);
+ SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list);
+ qdev->num_mc_addr++;
+ } else {
+ SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+ if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0)
+ break;
+ }
+ if (tmp == NULL) {
+ DP_INFO(edev, "Multicast mac is not found\n");
+ return -EINVAL;
+ }
+ SLIST_REMOVE(&qdev->mc_list_head, tmp,
+ qede_mcast_entry, list);
+ qdev->num_mc_addr--;
+ }
- PMD_INIT_FUNC_TRACE(edev);
+ return 0;
+}
- if (index >= qdev->dev_info.num_mac_addrs) {
- DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
- index, qdev->dev_info.num_mac_addrs);
- return;
+static enum _ecore_status_t
+qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc;
+ struct ecore_filter_mcast mcast;
+ struct qede_mcast_entry *tmp;
+ uint16_t j = 0;
+
+ /* Multicast */
+ if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) {
+ if (add) {
+ if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) {
+ DP_ERR(edev,
+ "Mcast filter table limit exceeded, "
+ "Please enable mcast promisc mode\n");
+ return -ECORE_INVAL;
+ }
+ }
+ rc = qede_mcast_filter(eth_dev, ucast, add);
+ if (rc == 0) {
+ DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr);
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.num_mc_addrs = qdev->num_mc_addr;
+ mcast.opcode = ECORE_FILTER_ADD;
+ SLIST_FOREACH(tmp, &qdev->mc_list_head, list) {
+ ether_addr_copy(&tmp->mac,
+ (struct ether_addr *)&mcast.mac[j]);
+ j++;
+ }
+ rc = ecore_filter_mcast_cmd(edev, &mcast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to add multicast filter"
+ " rc = %d, op = %d\n", rc, add);
+ }
+ } else { /* Unicast */
+ if (add) {
+ if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) {
+ DP_ERR(edev,
+ "Ucast filter table limit exceeded,"
+ " Please enable promisc mode\n");
+ return -ECORE_INVAL;
+ }
+ }
+ rc = qede_ucast_filter(eth_dev, ucast, add);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n",
+ rc, add);
+ }
}
- /* Adding macaddr even though promiscuous mode is set */
- if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
- DP_INFO(edev, "Port is in promisc mode, yet adding it\n");
+ return rc;
+}
+
+static void
+qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
+ uint32_t index, __rte_unused uint32_t pool)
+{
+ struct ecore_filter_ucast ucast;
- /* Add MAC filters according to the unicast secondary macs */
- rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
- mac_addr->addr_bytes);
- if (rc)
- DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc);
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.type = ECORE_FILTER_MAC;
+ ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
+ (void)qede_mac_int_ops(eth_dev, &ucast, 1);
}
static void
@@ -272,6 +406,7 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct ether_addr mac_addr;
+ struct ecore_filter_ucast ucast;
int rc;
PMD_INIT_FUNC_TRACE(edev);
@@ -282,12 +417,15 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
return;
}
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_REMOVE;
+ ucast.type = ECORE_FILTER_MAC;
+
/* Use the index maintained by rte */
- ether_addr_copy(&eth_dev->data->mac_addrs[index], &mac_addr);
- rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
- mac_addr.addr_bytes);
- if (rc)
- DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc);
+ ether_addr_copy(&eth_dev->data->mac_addrs[index],
+ (struct ether_addr *)&ucast.mac);
+
+ ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
}
static void
@@ -295,6 +433,7 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_ucast ucast;
int rc;
if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
@@ -306,10 +445,13 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
}
/* First remove the primary mac */
- rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL,
- qdev->primary_mac.addr_bytes);
-
- if (rc) {
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_REMOVE;
+ ucast.type = ECORE_FILTER_MAC;
+ ether_addr_copy(&qdev->primary_mac,
+ (struct ether_addr *)&ucast.mac);
+ rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != 0) {
DP_ERR(edev, "Unable to remove current macaddr"
" Reverting to previous default mac\n");
ether_addr_copy(&qdev->primary_mac,
@@ -318,18 +460,15 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
}
/* Add new MAC */
- rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD,
- mac_addr->addr_bytes);
-
- if (rc)
+ ucast.opcode = ECORE_FILTER_ADD;
+ ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
+ rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != 0)
DP_ERR(edev, "Unable to add new default mac\n");
else
ether_addr_copy(mac_addr, &qdev->primary_mac);
}
-
-
-
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
{
struct ecore_dev *edev = &qdev->edev;
@@ -415,22 +554,6 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
}
-static int qede_set_ucast_rx_vlan(struct qede_dev *qdev,
- enum qed_filter_xcast_params_type opcode,
- uint16_t vid)
-{
- struct qed_filter_params filter_cmd;
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_UCAST;
- filter_cmd.filter.ucast.type = opcode;
- filter_cmd.filter.ucast.vlan_valid = 1;
- filter_cmd.filter.ucast.vlan = vid;
-
- return qdev->ops->filter_config(edev, &filter_cmd);
-}
-
static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
uint16_t vlan_id, int on)
{
@@ -439,6 +562,7 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
struct qed_dev_eth_info *dev_info = &qdev->dev_info;
struct qede_vlan_entry *tmp = NULL;
struct qede_vlan_entry *vlan;
+ struct ecore_filter_ucast ucast;
int rc;
if (on) {
@@ -465,9 +589,13 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
return -ENOMEM;
}
- rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD,
- vlan_id);
- if (rc) {
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_ADD;
+ ucast.type = ECORE_FILTER_VLAN;
+ ucast.vlan = vlan_id;
+ rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
+ NULL);
+ if (rc != 0) {
DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id,
rc);
rte_free(vlan);
@@ -497,9 +625,13 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list);
- rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL,
- vlan_id);
- if (rc) {
+ qede_set_ucast_cmn_params(&ucast);
+ ucast.opcode = ECORE_FILTER_REMOVE;
+ ucast.type = ECORE_FILTER_VLAN;
+ ucast.vlan = vlan_id;
+ rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB,
+ NULL);
+ if (rc != 0) {
DP_ERR(edev, "Failed to delete VLAN %u rc %d\n",
vlan_id, rc);
} else {
@@ -742,22 +874,6 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
return ((curr->link_status == link.link_up) ? -1 : 0);
}
-static void
-qede_rx_mode_setting(struct rte_eth_dev *eth_dev,
- enum qed_filter_rx_mode_type accept_flags)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- struct qed_filter_params rx_mode;
-
- DP_INFO(edev, "%s mode %u\n", __func__, accept_flags);
-
- memset(&rx_mode, 0, sizeof(struct qed_filter_params));
- rx_mode.type = QED_FILTER_TYPE_RX_MODE;
- rx_mode.filter.accept_flags = accept_flags;
- qdev->ops->filter_config(edev, &rx_mode);
-}
-
static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
@@ -770,7 +886,7 @@ static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
- qede_rx_mode_setting(eth_dev, type);
+ qed_configure_filter_rx_mode(eth_dev, type);
}
static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
@@ -781,10 +897,11 @@ static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
PMD_INIT_FUNC_TRACE(edev);
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
- qede_rx_mode_setting(eth_dev,
- QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC);
else
- qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_REGULAR);
}
static void qede_poll_sp_sb_cb(void *param)
@@ -853,6 +970,7 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
struct ecore_dev *edev = &qdev->edev;
struct ecore_eth_stats stats;
unsigned int i = 0, j = 0, qid;
+ unsigned int rxq_stat_cntrs, txq_stat_cntrs;
struct qede_tx_queue *txq;
qdev->ops->get_vport_stats(edev, &stats);
@@ -886,6 +1004,17 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
eth_stats->oerrors = stats.tx_err_drop_pkts;
/* Queue stats */
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) ||
+ (txq_stat_cntrs != QEDE_TSS_COUNT(qdev)))
+ DP_VERBOSE(edev, ECORE_MSG_DEBUG,
+ "Not all the queue stats will be displayed. Set"
+ " RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
+ " appropriately and retry.\n");
+
for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
eth_stats->q_ipackets[i] =
@@ -904,7 +1033,11 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
rx_alloc_errors));
i++;
}
+ if (i == rxq_stat_cntrs)
+ break;
+ }
+ for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
txq = qdev->fp_array[(qid)].txqs[0];
eth_stats->q_opackets[j] =
@@ -914,13 +1047,17 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
xmit_pkts)));
j++;
}
+ if (j == txq_stat_cntrs)
+ break;
}
}
static unsigned
qede_get_xstats_count(struct qede_dev *qdev) {
return RTE_DIM(qede_xstats_strings) +
- (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev));
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
}
static int
@@ -930,6 +1067,7 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
struct qede_dev *qdev = dev->data->dev_private;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
if (xstats_names != NULL) {
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
@@ -940,7 +1078,9 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
stat_idx++;
}
- for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) {
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
snprintf(xstats_names[stat_idx].name,
sizeof(xstats_names[stat_idx].name),
@@ -964,6 +1104,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
struct ecore_eth_stats stats;
const unsigned int num = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
+ unsigned int rxq_stat_cntrs;
if (n < num)
return num;
@@ -973,15 +1114,19 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
qede_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
stat_idx++;
}
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
+ rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (qid = 0; qid < rxq_stat_cntrs; qid++) {
if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(
((char *)(qdev->fp_array[(qid)].rxq)) +
qede_rxq_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
stat_idx++;
}
}
@@ -1042,15 +1187,17 @@ static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev)
if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
type |= QED_FILTER_RX_MODE_TYPE_PROMISC;
- qede_rx_mode_setting(eth_dev, type);
+ qed_configure_filter_rx_mode(eth_dev, type);
}
static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1)
- qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC);
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_PROMISC);
else
- qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR);
+ qed_configure_filter_rx_mode(eth_dev,
+ QED_FILTER_RX_MODE_TYPE_REGULAR);
}
static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev,
@@ -1424,6 +1571,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ /* @DPDK */
+ edev->vendor_id = pci_dev->id.vendor_id;
+ edev->device_id = pci_dev->id.device_id;
+
qed_ops = qed_get_eth_ops();
if (!qed_ops) {
DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index a97e3d96..a35ea8bd 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -123,6 +123,17 @@ struct qede_vlan_entry {
uint16_t vid;
};
+struct qede_mcast_entry {
+ struct ether_addr mac;
+ SLIST_ENTRY(qede_mcast_entry) list;
+};
+
+struct qede_ucast_entry {
+ struct ether_addr mac;
+ uint16_t vlan;
+ SLIST_ENTRY(qede_ucast_entry) list;
+};
+
/*
* Structure to store private data for each port.
*/
@@ -147,6 +158,10 @@ struct qede_dev {
uint16_t configured_vlans;
bool accept_any_vlan;
struct ether_addr primary_mac;
+ SLIST_HEAD(mc_list_head, qede_mcast_entry) mc_list_head;
+ uint16_t num_mc_addr;
+ SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
+ uint16_t num_uc_addr;
bool handle_hw_err;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
};
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index ab22409a..b666e1c7 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -137,6 +137,7 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
if (fstat(fd, &st) < 0) {
DP_NOTICE(edev, false, "Can't stat firmware file\n");
+ close(fd);
return -1;
}
@@ -158,9 +159,11 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
if (edev->fw_len < 104) {
DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
edev->fw_len);
+ close(fd);
return -EINVAL;
}
+ close(fd);
return 0;
}
#endif
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 2e181c8c..a34b6659 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -435,13 +435,15 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
struct ecore_dev *edev = &qdev->edev;
struct qede_fastpath *fp;
uint32_t num_sbs;
- int rc, i;
+ uint16_t i;
+ uint16_t sb_idx;
+ int rc;
if (IS_VF(edev))
ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
else
- num_sbs = (ecore_cxt_get_proto_cid_count
- (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL)) / 2;
+ num_sbs = ecore_cxt_get_proto_cid_count
+ (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
if (num_sbs == 0) {
DP_ERR(edev, "No status blocks available\n");
@@ -459,7 +461,11 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
fp = &qdev->fp_array[i];
- if (qede_alloc_mem_sb(qdev, fp->sb_info, i % num_sbs)) {
+ if (IS_VF(edev))
+ sb_idx = i % num_sbs;
+ else
+ sb_idx = i;
+ if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
qede_free_fp_arrays(qdev);
return -ENOMEM;
}
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 766d4ef1..328dde08 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -115,9 +115,6 @@ struct pmd_internal {
char *dev_name;
char *iface_name;
uint16_t max_queues;
- uint64_t flags;
-
- volatile uint16_t once;
};
struct internal_list {
@@ -324,6 +321,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
*(uint64_t *)(((char *)vq)
+ vhost_rxport_stat_strings[t].offset);
}
+ xstats[count].id = count;
count++;
}
for (t = 0; t < VHOST_NB_XSTATS_TXPORT; t++) {
@@ -336,6 +334,7 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
*(uint64_t *)(((char *)vq)
+ vhost_txport_stat_strings[t].offset);
}
+ xstats[count].id = count;
count++;
}
return count;
@@ -774,35 +773,40 @@ vhost_driver_session_stop(void)
}
static int
-eth_dev_start(struct rte_eth_dev *dev)
+eth_dev_start(struct rte_eth_dev *dev __rte_unused)
{
- struct pmd_internal *internal = dev->data->dev_private;
- int ret = 0;
-
- if (rte_atomic16_cmpset(&internal->once, 0, 1)) {
- ret = rte_vhost_driver_register(internal->iface_name,
- internal->flags);
- if (ret)
- return ret;
- }
-
- /* We need only one message handling thread */
- if (rte_atomic16_add_return(&nb_started_ports, 1) == 1)
- ret = vhost_driver_session_start();
+ return 0;
+}
- return ret;
+static void
+eth_dev_stop(struct rte_eth_dev *dev __rte_unused)
+{
}
static void
-eth_dev_stop(struct rte_eth_dev *dev)
+eth_dev_close(struct rte_eth_dev *dev)
{
- struct pmd_internal *internal = dev->data->dev_private;
+ struct pmd_internal *internal;
+ struct internal_list *list;
+
+ internal = dev->data->dev_private;
+ if (!internal)
+ return;
- if (rte_atomic16_cmpset(&internal->once, 1, 0))
- rte_vhost_driver_unregister(internal->iface_name);
+ rte_vhost_driver_unregister(internal->iface_name);
- if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
- vhost_driver_session_stop();
+ list = find_internal_resource(internal->iface_name);
+ if (!list)
+ return;
+
+ pthread_mutex_lock(&internal_list_lock);
+ TAILQ_REMOVE(&internal_list, list, next);
+ pthread_mutex_unlock(&internal_list_lock);
+ rte_free(list);
+
+ free(internal->dev_name);
+ free(internal->iface_name);
+ rte_free(internal);
}
static int
@@ -973,6 +977,7 @@ rte_eth_vhost_feature_get(void)
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
+ .dev_close = eth_dev_close,
.dev_configure = eth_dev_configure,
.dev_infos_get = eth_dev_info,
.rx_queue_setup = eth_rx_queue_setup,
@@ -1046,7 +1051,6 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
internal->iface_name = strdup(iface_name);
if (internal->iface_name == NULL)
goto error;
- internal->flags = flags;
list->eth_dev = eth_dev;
pthread_mutex_lock(&internal_list_lock);
@@ -1081,6 +1085,15 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
eth_dev->rx_pkt_burst = eth_vhost_rx;
eth_dev->tx_pkt_burst = eth_vhost_tx;
+ if (rte_vhost_driver_register(iface_name, flags))
+ goto error;
+
+ /* We need only one message handling thread */
+ if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) {
+ if (vhost_driver_session_start())
+ goto error;
+ }
+
return data->port_id;
error:
@@ -1192,8 +1205,6 @@ static int
rte_pmd_vhost_remove(const char *name)
{
struct rte_eth_dev *eth_dev = NULL;
- struct pmd_internal *internal;
- struct internal_list *list;
unsigned int i;
RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
@@ -1203,27 +1214,16 @@ rte_pmd_vhost_remove(const char *name)
if (eth_dev == NULL)
return -ENODEV;
- internal = eth_dev->data->dev_private;
- if (internal == NULL)
- return -ENODEV;
-
- list = find_internal_resource(internal->iface_name);
- if (list == NULL)
- return -ENODEV;
+ eth_dev_stop(eth_dev);
- pthread_mutex_lock(&internal_list_lock);
- TAILQ_REMOVE(&internal_list, list, next);
- pthread_mutex_unlock(&internal_list_lock);
- rte_free(list);
+ eth_dev_close(eth_dev);
- eth_dev_stop(eth_dev);
+ if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
+ vhost_driver_session_stop();
rte_free(vring_states[eth_dev->data->port_id]);
vring_states[eth_dev->data->port_id] = NULL;
- free(internal->dev_name);
- free(internal->iface_name);
-
for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
rte_free(eth_dev->data->rx_queues[i]);
for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
@@ -1231,7 +1231,6 @@ rte_pmd_vhost_remove(const char *name)
rte_free(eth_dev->data->mac_addrs);
rte_free(eth_dev->data);
- rte_free(internal);
rte_eth_dev_release_port(eth_dev);
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index 079fd6c8..f5961ab7 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -152,6 +152,8 @@ static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
#define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \
sizeof(rte_virtio_txq_stat_strings[0]))
+struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+
static int
virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
@@ -360,7 +362,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
* Read the virtqueue size from the Queue Size field
* Always power of 2 and if 0 virtqueue does not exist
*/
- vq_size = hw->vtpci_ops->get_queue_num(hw, vtpci_queue_idx);
+ vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
if (vq_size == 0) {
PMD_INIT_LOG(ERR, "virtqueue does not exist");
@@ -519,7 +521,7 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
}
}
- if (hw->vtpci_ops->setup_queue(hw, vq) < 0) {
+ if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
PMD_INIT_LOG(ERR, "setup_queue failed");
return -EINVAL;
}
@@ -893,6 +895,7 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
xstats[count].value = *(uint64_t *)(((char *)rxvq) +
rte_virtio_rxq_stat_strings[t].offset);
+ xstats[count].id = count;
count++;
}
}
@@ -908,6 +911,7 @@ virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
xstats[count].value = *(uint64_t *)(((char *)txvq) +
rte_virtio_txq_stat_strings[t].offset);
+ xstats[count].id = count;
count++;
}
}
@@ -1114,7 +1118,7 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
req_features);
/* Read device(host) feature bits */
- host_features = hw->vtpci_ops->get_features(hw);
+ host_features = VTPCI_OPS(hw)->get_features(hw);
PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
host_features);
@@ -1204,14 +1208,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
/* If host does not support status then disable LSC */
if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
else
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
- rte_eth_copy_pci_info(eth_dev, pci_dev);
-
rx_func_get(eth_dev);
/* Setting up rx_header size for the device */
@@ -1287,6 +1291,52 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
}
/*
+ * Remap the PCI device again (IO port map for legacy device and
+ * memory map for modern device), so that the secondary process
+ * could have the PCI initiated correctly.
+ */
+static int
+virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
+{
+ if (hw->modern) {
+ /*
+ * We don't have to re-parse the PCI config space, since
+ * rte_eal_pci_map_device() makes sure the mapped address
+ * in secondary process would equal to the one mapped in
+ * the primary process: error will be returned if that
+ * requirement is not met.
+ *
+ * That said, we could simply reuse all cap pointers
+ * (such as dev_cfg, common_cfg, etc.) parsed from the
+ * primary process, which is stored in shared memory.
+ */
+ if (rte_eal_pci_map_device(pci_dev)) {
+ PMD_INIT_LOG(DEBUG, "failed to map pci device!");
+ return -1;
+ }
+ } else {
+ if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+virtio_set_vtpci_ops(struct virtio_hw *hw)
+{
+#ifdef RTE_VIRTIO_USER
+ if (hw->virtio_user_dev)
+ VTPCI_OPS(hw) = &virtio_user_ops;
+ else
+#endif
+ if (hw->modern)
+ VTPCI_OPS(hw) = &modern_ops;
+ else
+ VTPCI_OPS(hw) = &legacy_ops;
+}
+
+/*
* This function is based on probe() function in virtio_pci.c
* It returns 0 on success.
*/
@@ -1304,7 +1354,19 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
- rx_func_get(eth_dev);
+ if (!hw->virtio_user_dev) {
+ ret = virtio_remap_pci(eth_dev->pci_dev, hw);
+ if (ret)
+ return ret;
+ }
+
+ virtio_set_vtpci_ops(hw);
+ if (hw->use_simple_rxtx) {
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else {
+ rx_func_get(eth_dev);
+ }
return 0;
}
@@ -1318,6 +1380,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
}
pci_dev = eth_dev->pci_dev;
+ hw->port_id = eth_dev->data->port_id;
if (pci_dev) {
ret = vtpci_init(pci_dev, hw, &dev_flags);
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 27d9a190..4feccf93 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -70,6 +70,11 @@
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_IOMMU_PLATFORM)
+#define VIRTIO_PMD_SUPPORTED_GUEST_FEATURES \
+ (VIRTIO_PMD_DEFAULT_GUEST_FEATURES | \
+ 1u << VIRTIO_NET_F_GUEST_CSUM | \
+ 1u << VIRTIO_NET_F_GUEST_TSO4 | \
+ 1u << VIRTIO_NET_F_GUEST_TSO6)
/*
* CQ function prototype
*/
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index 9b47165d..8d5355c7 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -92,17 +92,17 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
while (length > 0) {
if (length >= 4) {
size = 4;
- rte_eal_pci_ioport_read(&hw->io, dst, size,
+ rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
*(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
} else if (length >= 2) {
size = 2;
- rte_eal_pci_ioport_read(&hw->io, dst, size,
+ rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
*(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
} else {
size = 1;
- rte_eal_pci_ioport_read(&hw->io, dst, size,
+ rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
}
@@ -111,7 +111,7 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
length -= size;
}
#else
- rte_eal_pci_ioport_read(&hw->io, dst, length,
+ rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, length,
VIRTIO_PCI_CONFIG(hw) + offset);
#endif
}
@@ -131,16 +131,16 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
if (length >= 4) {
size = 4;
tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
- rte_eal_pci_ioport_write(&hw->io, &tmp.u32, size,
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
VIRTIO_PCI_CONFIG(hw) + offset);
} else if (length >= 2) {
size = 2;
tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
- rte_eal_pci_ioport_write(&hw->io, &tmp.u16, size,
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
VIRTIO_PCI_CONFIG(hw) + offset);
} else {
size = 1;
- rte_eal_pci_ioport_write(&hw->io, src, size,
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), src, size,
VIRTIO_PCI_CONFIG(hw) + offset);
}
@@ -149,7 +149,7 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
length -= size;
}
#else
- rte_eal_pci_ioport_write(&hw->io, src, length,
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), src, length,
VIRTIO_PCI_CONFIG(hw) + offset);
#endif
}
@@ -159,7 +159,8 @@ legacy_get_features(struct virtio_hw *hw)
{
uint32_t dst;
- rte_eal_pci_ioport_read(&hw->io, &dst, 4, VIRTIO_PCI_HOST_FEATURES);
+ rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 4,
+ VIRTIO_PCI_HOST_FEATURES);
return dst;
}
@@ -171,7 +172,7 @@ legacy_set_features(struct virtio_hw *hw, uint64_t features)
"only 32 bit features are allowed for legacy virtio!");
return;
}
- rte_eal_pci_ioport_write(&hw->io, &features, 4,
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &features, 4,
VIRTIO_PCI_GUEST_FEATURES);
}
@@ -180,14 +181,14 @@ legacy_get_status(struct virtio_hw *hw)
{
uint8_t dst;
- rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_STATUS);
+ rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
return dst;
}
static void
legacy_set_status(struct virtio_hw *hw, uint8_t status)
{
- rte_eal_pci_ioport_write(&hw->io, &status, 1, VIRTIO_PCI_STATUS);
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
}
static void
@@ -201,7 +202,7 @@ legacy_get_isr(struct virtio_hw *hw)
{
uint8_t dst;
- rte_eal_pci_ioport_read(&hw->io, &dst, 1, VIRTIO_PCI_ISR);
+ rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
return dst;
}
@@ -211,8 +212,10 @@ legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
{
uint16_t dst;
- rte_eal_pci_ioport_write(&hw->io, &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
- rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2,
+ VIRTIO_MSI_CONFIG_VECTOR);
+ rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2,
+ VIRTIO_MSI_CONFIG_VECTOR);
return dst;
}
@@ -221,8 +224,9 @@ legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
{
uint16_t dst;
- rte_eal_pci_ioport_write(&hw->io, &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
- rte_eal_pci_ioport_read(&hw->io, &dst, 2, VIRTIO_PCI_QUEUE_NUM);
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2,
+ VIRTIO_PCI_QUEUE_SEL);
+ rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
return dst;
}
@@ -234,10 +238,10 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
if (!check_vq_phys_addr_ok(vq))
return -1;
- rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
VIRTIO_PCI_QUEUE_SEL);
src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
- rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN);
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
return 0;
}
@@ -247,15 +251,15 @@ legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
uint32_t src = 0;
- rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
VIRTIO_PCI_QUEUE_SEL);
- rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN);
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
}
static void
legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
- rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2,
+ rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
VIRTIO_PCI_QUEUE_NOTIFY);
}
@@ -289,7 +293,7 @@ static int
legacy_virtio_resource_init(struct rte_pci_device *pci_dev,
struct virtio_hw *hw, uint32_t *dev_flags)
{
- if (rte_eal_pci_ioport_map(pci_dev, 0, &hw->io) < 0)
+ if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
return -1;
if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN)
@@ -300,7 +304,7 @@ legacy_virtio_resource_init(struct rte_pci_device *pci_dev,
return 0;
}
-static const struct virtio_pci_ops legacy_ops = {
+const struct virtio_pci_ops legacy_ops = {
.read_dev_cfg = legacy_read_dev_config,
.write_dev_cfg = legacy_write_dev_config,
.reset = legacy_reset,
@@ -516,7 +520,7 @@ modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
io_write16(1, vq->notify_addr);
}
-static const struct virtio_pci_ops modern_ops = {
+const struct virtio_pci_ops modern_ops = {
.read_dev_cfg = modern_read_dev_config,
.write_dev_cfg = modern_write_dev_config,
.reset = modern_reset,
@@ -537,14 +541,14 @@ void
vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
void *dst, int length)
{
- hw->vtpci_ops->read_dev_cfg(hw, offset, dst, length);
+ VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
}
void
vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
const void *src, int length)
{
- hw->vtpci_ops->write_dev_cfg(hw, offset, src, length);
+ VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
}
uint64_t
@@ -557,7 +561,7 @@ vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
* host all support.
*/
features = host_features & hw->guest_features;
- hw->vtpci_ops->set_features(hw, features);
+ VTPCI_OPS(hw)->set_features(hw, features);
return features;
}
@@ -565,9 +569,9 @@ vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
void
vtpci_reset(struct virtio_hw *hw)
{
- hw->vtpci_ops->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
/* flush status write */
- hw->vtpci_ops->get_status(hw);
+ VTPCI_OPS(hw)->get_status(hw);
}
void
@@ -580,21 +584,21 @@ void
vtpci_set_status(struct virtio_hw *hw, uint8_t status)
{
if (status != VIRTIO_CONFIG_STATUS_RESET)
- status |= hw->vtpci_ops->get_status(hw);
+ status |= VTPCI_OPS(hw)->get_status(hw);
- hw->vtpci_ops->set_status(hw, status);
+ VTPCI_OPS(hw)->set_status(hw, status);
}
uint8_t
vtpci_get_status(struct virtio_hw *hw)
{
- return hw->vtpci_ops->get_status(hw);
+ return VTPCI_OPS(hw)->get_status(hw);
}
uint8_t
vtpci_isr(struct virtio_hw *hw)
{
- return hw->vtpci_ops->get_isr(hw);
+ return VTPCI_OPS(hw)->get_isr(hw);
}
@@ -602,7 +606,7 @@ vtpci_isr(struct virtio_hw *hw)
uint16_t
vtpci_irq_config(struct virtio_hw *hw, uint16_t vec)
{
- return hw->vtpci_ops->set_config_irq(hw, vec);
+ return VTPCI_OPS(hw)->set_config_irq(hw, vec);
}
static void *
@@ -736,8 +740,8 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
*/
if (virtio_read_caps(dev, hw) == 0) {
PMD_INIT_LOG(INFO, "modern virtio pci detected.");
- hw->vtpci_ops = &modern_ops;
- hw->modern = 1;
+ virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
+ hw->modern = 1;
*dev_flags |= RTE_ETH_DEV_INTR_LSC;
return 0;
}
@@ -755,7 +759,7 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
return -1;
}
- hw->vtpci_ops = &legacy_ops;
+ virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
hw->use_msix = legacy_virtio_has_msix(&dev->addr);
hw->modern = 0;
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index de271bfe..511a1c87 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -245,7 +245,6 @@ struct virtio_net_config;
struct virtio_hw {
struct virtnet_ctl *cvq;
- struct rte_pci_ioport io;
uint64_t req_guest_features;
uint64_t guest_features;
uint32_t max_queue_pairs;
@@ -254,6 +253,7 @@ struct virtio_hw {
uint8_t use_msix;
uint8_t modern;
uint8_t use_simple_rxtx;
+ uint8_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
uint32_t notify_off_multiplier;
uint8_t *isr;
@@ -261,12 +261,28 @@ struct virtio_hw {
struct rte_pci_device *dev;
struct virtio_pci_common_cfg *common_cfg;
struct virtio_net_config *dev_cfg;
- const struct virtio_pci_ops *vtpci_ops;
void *virtio_user_dev;
struct virtqueue **vqs;
};
+
+/*
+ * While virtio_hw is stored in shared memory, this structure stores
+ * some infos that may vary in the multiple process model locally.
+ * For example, the vtpci_ops pointer.
+ */
+struct virtio_hw_internal {
+ const struct virtio_pci_ops *vtpci_ops;
+ struct rte_pci_ioport io;
+};
+
+#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->port_id].vtpci_ops)
+#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->port_id].io)
+
+extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
+
+
/*
* This structure is just a reference to read
* net device specific config space; it just a chodu structure
@@ -317,4 +333,8 @@ uint8_t vtpci_isr(struct virtio_hw *);
uint16_t vtpci_irq_config(struct virtio_hw *, uint16_t);
+extern const struct virtio_pci_ops legacy_ops;
+extern const struct virtio_pci_ops modern_ops;
+extern const struct virtio_pci_ops virtio_user_ops;
+
#endif /* _VIRTIO_PCI_H_ */
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index 22d97a4e..a33ef1a8 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -258,6 +258,12 @@ tx_offload_enabled(struct virtio_hw *hw)
vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
}
+/* avoid write operation when necessary, to lessen cache issues */
+#define ASSIGN_UNLESS_EQUAL(var, val) do { \
+ if ((var) != (val)) \
+ (var) = (val); \
+} while (0)
+
static inline void
virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
uint16_t needed, int use_indirect, int can_push)
@@ -286,8 +292,14 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
hdr = (struct virtio_net_hdr *)
rte_pktmbuf_prepend(cookie, head_size);
/* if offload disabled, it is not zeroed below, do it now */
- if (offload == 0)
- memset(hdr, 0, head_size);
+ if (offload == 0) {
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
+ }
} else if (use_indirect) {
/* setup tx ring slot to point to indirect
* descriptor list stored in reserved region.
@@ -337,9 +349,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
break;
default:
- hdr->csum_start = 0;
- hdr->csum_offset = 0;
- hdr->flags = 0;
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
break;
}
@@ -355,9 +367,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
cookie->l3_len +
cookie->l4_len;
} else {
- hdr->gso_type = 0;
- hdr->gso_size = 0;
- hdr->hdr_len = 0;
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
}
}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index e239e0eb..a38398b8 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -148,12 +148,13 @@ virtio_user_start_device(struct virtio_user_dev *dev)
/* Step 1: set features
* Make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is enabled,
- * and VIRTIO_NET_F_MAC is stripped.
+ * VIRTIO_NET_F_MAC and VIRTIO_NET_F_CTRL_VQ is stripped.
*/
features = dev->features;
if (dev->max_queue_pairs > 1)
features |= VHOST_USER_MQ;
features &= ~(1ull << VIRTIO_NET_F_MAC);
+ features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_FEATURES, &features);
if (ret < 0)
goto error;
@@ -181,7 +182,17 @@ error:
int virtio_user_stop_device(struct virtio_user_dev *dev)
{
- return vhost_user_sock(dev->vhostfd, VHOST_USER_RESET_OWNER, NULL);
+ uint32_t i;
+
+ for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+ close(dev->callfds[i]);
+ close(dev->kickfds[i]);
+ }
+
+ for (i = 0; i < dev->max_queue_pairs; ++i)
+ vhost_user_enable_queue_pair(dev->vhostfd, i, 0);
+
+ return 0;
}
static inline void
@@ -209,6 +220,8 @@ int
virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
int cq, int queue_size, const char *mac)
{
+ uint32_t i;
+
snprintf(dev->path, PATH_MAX, "%s", path);
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
@@ -217,6 +230,11 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
parse_mac(dev, mac);
dev->vhostfd = -1;
+ for (i = 0; i < VIRTIO_MAX_VIRTQUEUES * 2 + 1; ++i) {
+ dev->kickfds[i] = -1;
+ dev->callfds[i] = -1;
+ }
+
dev->vhostfd = vhost_user_setup(dev->path);
if (dev->vhostfd < 0) {
PMD_INIT_LOG(ERR, "backend set up fails");
@@ -228,29 +246,26 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
}
if (vhost_user_sock(dev->vhostfd, VHOST_USER_GET_FEATURES,
- &dev->features) < 0) {
+ &dev->device_features) < 0) {
PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
return -1;
}
if (dev->mac_specified)
- dev->features |= (1ull << VIRTIO_NET_F_MAC);
+ dev->device_features |= (1ull << VIRTIO_NET_F_MAC);
- if (!cq) {
- dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
- /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
- dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
- dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
- dev->features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
- dev->features &= ~(1ull << VIRTIO_NET_F_MQ);
- dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
- } else {
- /* vhost user backend does not need to know ctrl-q, so
- * actually we need add this bit into features. However,
- * DPDK vhost-user does send features with this bit, so we
- * check it instead of OR it for now.
+ if (cq) {
+ /* device does not really need to know anything about CQ,
+ * so if necessary, we just claim to support CQ
*/
- if (!(dev->features & (1ull << VIRTIO_NET_F_CTRL_VQ)))
- PMD_INIT_LOG(INFO, "vhost does not support ctrl-q");
+ dev->device_features |= (1ull << VIRTIO_NET_F_CTRL_VQ);
+ } else {
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
+ /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_RX);
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN);
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE);
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_MQ);
+ dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
}
if (dev->max_queue_pairs > 1) {
@@ -266,13 +281,6 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
void
virtio_user_dev_uninit(struct virtio_user_dev *dev)
{
- uint32_t i;
-
- for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
- close(dev->callfds[i]);
- close(dev->kickfds[i]);
- }
-
close(dev->vhostfd);
}
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 33690b5c..28fc788e 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -46,7 +46,10 @@ struct virtio_user_dev {
uint32_t max_queue_pairs;
uint32_t queue_pairs;
uint32_t queue_size;
- uint64_t features;
+ uint64_t features; /* the negotiated features with driver,
+ * and will be sync with device
+ */
+ uint64_t device_features; /* supported features by device */
uint8_t status;
uint8_t mac_addr[ETHER_ADDR_LEN];
char path[PATH_MAX];
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 406beeac..013600e4 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -87,21 +87,24 @@ virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
}
static void
-virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
+virtio_user_reset(struct virtio_hw *hw)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
- if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
- virtio_user_start_device(dev);
- dev->status = status;
+ if (dev->status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
+ virtio_user_stop_device(dev);
}
static void
-virtio_user_reset(struct virtio_hw *hw)
+virtio_user_set_status(struct virtio_hw *hw, uint8_t status)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
- virtio_user_stop_device(dev);
+ if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK)
+ virtio_user_start_device(dev);
+ else if (status == VIRTIO_CONFIG_STATUS_RESET)
+ virtio_user_reset(hw);
+ dev->status = status;
}
static uint8_t
@@ -117,7 +120,8 @@ virtio_user_get_features(struct virtio_hw *hw)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
- return dev->features;
+ /* unmask feature bits defined in vhost user protocol */
+ return dev->device_features & VIRTIO_PMD_SUPPORTED_GUEST_FEATURES;
}
static void
@@ -125,7 +129,7 @@ virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
{
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
- dev->features = features;
+ dev->features = features & dev->device_features;
}
static uint8_t
@@ -212,7 +216,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
strerror(errno));
}
-static const struct virtio_pci_ops virtio_user_ops = {
+const struct virtio_pci_ops virtio_user_ops = {
.read_dev_cfg = virtio_user_read_dev_config,
.write_dev_cfg = virtio_user_write_dev_config,
.reset = virtio_user_reset,
@@ -301,7 +305,8 @@ virtio_user_eth_dev_alloc(const char *name)
return NULL;
}
- hw->vtpci_ops = &virtio_user_ops;
+ hw->port_id = data->port_id;
+ virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
hw->use_msix = 0;
hw->modern = 0;
hw->use_simple_rxtx = 0;
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index f0bb0899..b1070e05 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -330,7 +330,7 @@ virtqueue_notify(struct virtqueue *vq)
* For virtio on IA, the notificaiton is through io port operation
* which is a serialization instruction itself.
*/
- vq->hw->vtpci_ops->notify_queue(vq->hw, vq);
+ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
}
#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index b1091688..93db10fb 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -518,6 +518,32 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+static inline void
+vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
+ struct rte_mbuf *mbuf)
+{
+ uint32_t val = 0;
+ struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+ struct Vmxnet3_RxDesc *rxd =
+ (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
+ vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
+ if (ring_id == 0)
+ val = VMXNET3_RXD_BTYPE_HEAD;
+ else
+ val = VMXNET3_RXD_BTYPE_BODY;
+
+ buf_info->m = mbuf;
+ buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
+ buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+
+ rxd->addr = buf_info->bufPA;
+ rxd->btype = val;
+ rxd->len = buf_info->len;
+ rxd->gen = ring->gen;
+
+ vmxnet3_cmd_ring_adv_next2fill(ring);
+}
/*
* Allocates mbufs and clusters. Post rx descriptors with buffer details
* so that device can receive packets in those buffers.
@@ -657,9 +683,18 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
while (rcd->gen == rxq->comp_ring.gen) {
+ struct rte_mbuf *newm;
+
if (nb_rx >= nb_pkts)
break;
+ newm = rte_mbuf_raw_alloc(rxq->mp);
+ if (unlikely(newm == NULL)) {
+ PMD_RX_LOG(ERR, "Error allocating mbuf");
+ rxq->stats.rx_buf_alloc_failure++;
+ break;
+ }
+
idx = rcd->rxdIdx;
ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
@@ -759,8 +794,8 @@ rcd_done:
VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
rxq->cmd_ring[ring_idx].size);
- /* It's time to allocate some new buf and renew descriptors */
- vmxnet3_post_rx_bufs(rxq, ring_idx);
+ /* It's time to renew descriptors */
+ vmxnet3_renew_desc(rxq, ring_idx, newm);
if (unlikely(rxq->shared->ctrl.updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
rxq->cmd_ring[ring_idx].next2fill);