summaryrefslogtreecommitdiffstats
path: root/drivers/net/qede/qede_ethdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qede/qede_ethdev.c')
-rw-r--r--drivers/net/qede/qede_ethdev.c1137
1 files changed, 892 insertions, 245 deletions
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 6d6fb9de..7501eb20 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -12,9 +12,113 @@
/* Globals */
static const struct qed_eth_ops *qed_ops;
-static const char *drivername = "qede pmd";
static int64_t timer_period = 1;
+/* VXLAN tunnel classification mapping */
+const struct _qede_vxlan_tunn_types {
+ uint16_t rte_filter_type;
+ enum ecore_filter_ucast_type qede_type;
+ enum ecore_tunn_clss qede_tunn_clss;
+ const char *string;
+} qede_tunn_types[] = {
+ {
+ ETH_TUNNEL_FILTER_OMAC,
+ ECORE_FILTER_MAC,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ "outer-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_VNI,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "outer-mac and vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ "vni and inner-mac",
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "vni and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_OIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-IP"
+ },
+ {
+ ETH_TUNNEL_FILTER_IIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "inner-IP"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "OMAC_TENID_IMAC"
+ },
+};
+
struct rte_qede_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint64_t offset;
@@ -175,14 +279,14 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
}
static void
-qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
+qede_interrupt_handler(void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
qede_interrupt_action(ECORE_LEADING_HWFN(edev));
- if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
+ if (rte_intr_enable(eth_dev->intr_handle))
DP_ERR(edev, "rte_intr_enable failed\n");
}
@@ -194,6 +298,7 @@ qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
qdev->ops = qed_ops;
}
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
static void qede_print_adapter_info(struct qede_dev *qdev)
{
struct ecore_dev *edev = &qdev->edev;
@@ -222,6 +327,7 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, " Firmware file : %s\n", fw_file);
DP_INFO(edev, "*********************************\n");
}
+#endif
static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
{
@@ -231,6 +337,17 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
/* ucast->assert_on_error = true; - For debug */
}
+static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
+ uint8_t clss, bool mode, bool mask)
+{
+ memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
+ p_tunn->vxlan.b_update_mode = mode;
+ p_tunn->vxlan.b_mode_enabled = mask;
+ p_tunn->b_update_rx_cls = true;
+ p_tunn->b_update_tx_cls = true;
+ p_tunn->vxlan.tun_cls = clss;
+}
+
static int
qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
bool add)
@@ -261,13 +378,15 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
}
ether_addr_copy(mac_addr, &u->mac);
u->vlan = ucast->vlan;
+ u->vni = ucast->vni;
SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
qdev->num_uc_addr++;
} else {
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
ETHER_ADDR_LEN) == 0) &&
- ucast->vlan == tmp->vlan)
+ ucast->vlan == tmp->vlan &&
+ ucast->vni == tmp->vni)
break;
}
if (tmp == NULL) {
@@ -368,7 +487,8 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
}
} else { /* Unicast */
if (add) {
- if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) {
+ if (qdev->num_uc_addr >=
+ qdev->dev_info.num_mac_filters) {
DP_ERR(edev,
"Ucast filter table limit exceeded,"
" Please enable promisc mode\n");
@@ -388,16 +508,18 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
return rc;
}
-static void
+static int
qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
- uint32_t index, __rte_unused uint32_t pool)
+ __rte_unused uint32_t index, __rte_unused uint32_t pool)
{
struct ecore_filter_ucast ucast;
+ int re;
qede_set_ucast_cmn_params(&ucast);
ucast.type = ECORE_FILTER_MAC;
ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
- (void)qede_mac_int_ops(eth_dev, &ucast, 1);
+ re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
+ return re;
}
static void
@@ -405,15 +527,13 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct ether_addr mac_addr;
struct ecore_filter_ucast ucast;
- int rc;
PMD_INIT_FUNC_TRACE(edev);
- if (index >= qdev->dev_info.num_mac_addrs) {
+ if (index >= qdev->dev_info.num_mac_filters) {
DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
- index, qdev->dev_info.num_mac_addrs);
+ index, qdev->dev_info.num_mac_filters);
return;
}
@@ -433,8 +553,6 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_filter_ucast ucast;
- int rc;
if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
mac_addr->addr_bytes)) {
@@ -444,29 +562,7 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
return;
}
- /* First remove the primary mac */
- qede_set_ucast_cmn_params(&ucast);
- ucast.opcode = ECORE_FILTER_REMOVE;
- ucast.type = ECORE_FILTER_MAC;
- ether_addr_copy(&qdev->primary_mac,
- (struct ether_addr *)&ucast.mac);
- rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
- if (rc != 0) {
- DP_ERR(edev, "Unable to remove current macaddr"
- " Reverting to previous default mac\n");
- ether_addr_copy(&qdev->primary_mac,
- &eth_dev->data->mac_addrs[0]);
- return;
- }
-
- /* Add new MAC */
- ucast.opcode = ECORE_FILTER_ADD;
- ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
- rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
- if (rc != 0)
- DP_ERR(edev, "Unable to add new default mac\n");
- else
- ether_addr_copy(mac_addr, &qdev->primary_mac);
+ qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
@@ -510,50 +606,11 @@ static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
DP_ERR(edev, "Update V-PORT failed %d\n", rc);
return rc;
}
+ qdev->vlan_strip_flg = set_stripping;
return 0;
}
-static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
-
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->hw_vlan_strip)
- (void)qede_vlan_stripping(eth_dev, 1);
- else
- (void)qede_vlan_stripping(eth_dev, 0);
- }
-
- if (mask & ETH_VLAN_FILTER_MASK) {
- /* VLAN filtering kicks in when a VLAN is added */
- if (rxmode->hw_vlan_filter) {
- qede_vlan_filter_set(eth_dev, 0, 1);
- } else {
- if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
- DP_NOTICE(edev, false,
- " Please remove existing VLAN filters"
- " before disabling VLAN filtering\n");
- /* Signal app that VLAN filtering is still
- * enabled
- */
- rxmode->hw_vlan_filter = true;
- } else {
- qede_vlan_filter_set(eth_dev, 0, 0);
- }
- }
- }
-
- if (mask & ETH_VLAN_EXTEND_MASK)
- DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
- " and classification is based on outer tag only\n");
-
- DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
- mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
-}
-
static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
uint16_t vlan_id, int on)
{
@@ -567,7 +624,7 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
if (on) {
if (qdev->configured_vlans == dev_info->num_vlan_filters) {
- DP_INFO(edev, "Reached max VLAN filter limit"
+ DP_ERR(edev, "Reached max VLAN filter limit"
" enabling accept_any_vlan\n");
qede_config_accept_any_vlan(qdev, true);
return 0;
@@ -644,6 +701,46 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
return rc;
}
+static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->hw_vlan_strip)
+ (void)qede_vlan_stripping(eth_dev, 1);
+ else
+ (void)qede_vlan_stripping(eth_dev, 0);
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* VLAN filtering kicks in when a VLAN is added */
+ if (rxmode->hw_vlan_filter) {
+ qede_vlan_filter_set(eth_dev, 0, 1);
+ } else {
+ if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
+ DP_ERR(edev,
+ " Please remove existing VLAN filters"
+ " before disabling VLAN filtering\n");
+ /* Signal app that VLAN filtering is still
+ * enabled
+ */
+ rxmode->hw_vlan_filter = true;
+ } else {
+ qede_vlan_filter_set(eth_dev, 0, 0);
+ }
+ }
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
+ " and classification is based on outer tag only\n");
+
+ DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
+ mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
+}
+
static int qede_init_vport(struct qede_dev *qdev)
{
struct ecore_dev *edev = &qdev->edev;
@@ -651,7 +748,7 @@ static int qede_init_vport(struct qede_dev *qdev)
int rc;
start.remove_inner_vlan = 1;
- start.gro_enable = 0;
+ start.enable_lro = qdev->enable_lro;
start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
start.vport_id = 0;
start.drop_ttl0 = false;
@@ -671,12 +768,62 @@ static int qede_init_vport(struct qede_dev *qdev)
return 0;
}
+static void qede_prandom_bytes(uint32_t *buff)
+{
+ uint8_t i;
+
+ srand((unsigned int)time(NULL));
+ for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
+ buff[i] = rand();
+}
+
+int qede_config_rss(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+#endif
+ uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[2];
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, id, pos, q;
+
+ rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if (!rss_conf.rss_key) {
+ DP_INFO(edev, "Applying driver default key\n");
+ rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
+ qede_prandom_bytes(&def_rss_key[0]);
+ rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
+ }
+
+ /* Configure RSS hash */
+ if (qede_rss_hash_update(eth_dev, &rss_conf))
+ return -EINVAL;
+
+ /* Configure default RETA */
+ memset(reta_conf, 0, sizeof(reta_conf));
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
+ reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ id = i / RTE_RETA_GROUP_SIZE;
+ pos = i % RTE_RETA_GROUP_SIZE;
+ q = i % QEDE_RSS_COUNT(qdev);
+ reta_conf[id].reta[pos] = q;
+ }
+ if (qede_rss_reta_update(eth_dev, &reta_conf[0],
+ ECORE_RSS_IND_TABLE_SIZE))
+ return -EINVAL;
+
+ return 0;
+}
+
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
- int rc, i, j;
+ int rc;
PMD_INIT_FUNC_TRACE(edev);
@@ -684,14 +831,13 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (edev->num_hwfns > 1) {
if (eth_dev->data->nb_rx_queues < 2 ||
eth_dev->data->nb_tx_queues < 2) {
- DP_NOTICE(edev, false,
- "100G mode needs min. 2 RX/TX queues\n");
+ DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
(eth_dev->data->nb_tx_queues % 2 != 0)) {
- DP_NOTICE(edev, false,
+ DP_ERR(edev,
"100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
@@ -701,11 +847,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (rxmode->enable_scatter == 1)
eth_dev->data->scattered_rx = 1;
- if (rxmode->enable_lro == 1) {
- DP_INFO(edev, "LRO is not supported\n");
- return -EINVAL;
- }
-
if (!rxmode->hw_strip_crc)
DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
@@ -713,6 +854,13 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
"in hw\n");
+ if (rxmode->enable_lro) {
+ qdev->enable_lro = true;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
+ }
+
/* Check for the port restart case */
if (qdev->state != QEDE_DEV_INIT) {
rc = qdev->ops->vport_stop(edev, 0);
@@ -739,11 +887,24 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (rc != 0)
return rc;
- SLIST_INIT(&qdev->vlan_list_head);
+ if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
+ rxmode->mq_mode == ETH_MQ_RX_NONE)) {
+ DP_ERR(edev, "Unsupported RSS mode\n");
+ qdev->ops->vport_stop(edev, 0);
+ qede_dealloc_fp_resc(eth_dev);
+ return -EINVAL;
+ }
- /* Add primary mac for PF */
- if (IS_PF(edev))
- qede_mac_addr_set(eth_dev, &qdev->primary_mac);
+ /* Flow director mode check */
+ rc = qede_check_fdir_support(eth_dev);
+ if (rc) {
+ qdev->ops->vport_stop(edev, 0);
+ qede_dealloc_fp_resc(eth_dev);
+ return -EINVAL;
+ }
+ SLIST_INIT(&qdev->fdir_info.fdir_list_head);
+
+ SLIST_INIT(&qdev->vlan_list_head);
/* Enable VLAN offloads by default */
qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
@@ -763,13 +924,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
static const struct rte_eth_desc_lim qede_rx_desc_lim = {
.nb_max = NUM_RX_BDS_MAX,
.nb_min = 128,
- .nb_align = 128 /* lowest common multiple */
+ .nb_align = 128 /* lowest common multiple */
};
static const struct rte_eth_desc_lim qede_tx_desc_lim = {
.nb_max = NUM_TX_BDS_MAX,
.nb_min = 256,
- .nb_align = 256
+ .nb_align = 256,
+ .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
+ .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
};
static void
@@ -783,34 +946,44 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
- QEDE_ETH_OVERHEAD);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
dev_info->tx_desc_lim = qede_tx_desc_lim;
- dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
- dev_info->max_tx_queues = dev_info->max_rx_queues;
- dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
- if (IS_VF(edev))
- dev_info->max_vfs = 0;
+
+ if (IS_PF(edev))
+ dev_info->max_rx_queues = (uint16_t)RTE_MIN(
+ QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
else
- dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
- dev_info->driver_name = qdev->drv_ver;
+ dev_info->max_rx_queues = (uint16_t)RTE_MIN(
+ QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
+ dev_info->max_tx_queues = dev_info->max_rx_queues;
+
+ dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
+ dev_info->max_vfs = 0;
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
+ dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
dev_info->default_txconf = (struct rte_eth_txconf) {
.txq_flags = QEDE_TXQ_FLAGS,
};
- dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO);
+
+ dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
@@ -876,10 +1049,12 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
PMD_INIT_FUNC_TRACE(edev);
+#endif
enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
@@ -891,10 +1066,12 @@ static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
PMD_INIT_FUNC_TRACE(edev);
+#endif
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
qed_configure_filter_rx_mode(eth_dev,
@@ -926,12 +1103,15 @@ static void qede_poll_sp_sb_cb(void *param)
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
int rc;
PMD_INIT_FUNC_TRACE(edev);
+ qede_fdir_dealloc_resc(eth_dev);
+
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
@@ -952,9 +1132,9 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
qdev->ops->common->remove(edev);
- rte_intr_disable(&eth_dev->pci_dev->intr_handle);
+ rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
if (edev->num_hwfns > 1)
@@ -1008,8 +1188,8 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
RTE_ETHDEV_QUEUE_STAT_CNTRS);
txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) ||
- (txq_stat_cntrs != QEDE_TSS_COUNT(qdev)))
+ if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
+ (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
DP_VERBOSE(edev, ECORE_MSG_DEBUG,
"Not all the queue stats will be displayed. Set"
" RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
@@ -1062,7 +1242,8 @@ qede_get_xstats_count(struct qede_dev *qdev) {
static int
qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, unsigned limit)
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
{
struct qede_dev *qdev = dev->data->dev_private;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
@@ -1277,7 +1458,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
-void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
+static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
{
*rss_caps = 0;
*rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
@@ -1286,87 +1467,182 @@ void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
}
-static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_conf *rss_conf)
+int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
{
- struct qed_update_vport_params vport_update_params;
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params vport_update_params;
+ struct ecore_rss_params rss_params;
+ struct ecore_hwfn *p_hwfn;
uint32_t *key = (uint32_t *)rss_conf->rss_key;
uint64_t hf = rss_conf->rss_hf;
- int i;
+ uint8_t len = rss_conf->rss_key_len;
+ uint8_t idx;
+ uint8_t i;
+ int rc;
memset(&vport_update_params, 0, sizeof(vport_update_params));
+ memset(&rss_params, 0, sizeof(rss_params));
+
+ DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
+ (unsigned long)hf, len, key);
if (hf != 0) {
- /* Enable RSS */
- qede_init_rss_caps(&qdev->rss_params.rss_caps, hf);
- memcpy(&vport_update_params.rss_params, &qdev->rss_params,
- sizeof(vport_update_params.rss_params));
- if (key)
- memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
- rss_conf->rss_key_len);
- vport_update_params.update_rss_flg = 1;
- qdev->rss_enabled = 1;
- } else {
- /* Disable RSS */
- qdev->rss_enabled = 0;
+ /* Enabling RSS */
+ DP_INFO(edev, "Enabling rss\n");
+
+ /* RSS caps */
+ qede_init_rss_caps(&rss_params.rss_caps, hf);
+ rss_params.update_rss_capabilities = 1;
+
+ /* RSS hash key */
+ if (key) {
+ if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
+ DP_ERR(edev, "RSS key length exceeds limit\n");
+ return -EINVAL;
+ }
+ DP_INFO(edev, "Applying user supplied hash key\n");
+ rss_params.update_rss_key = 1;
+ memcpy(&rss_params.rss_key, key, len);
+ }
+ rss_params.rss_enable = 1;
}
- /* If the mapping doesn't fit any supported, return */
- if (qdev->rss_params.rss_caps == 0 && hf != 0)
- return -EINVAL;
-
- DP_INFO(edev, "%s\n", (vport_update_params.update_rss_flg) ?
- "Enabling RSS" : "Disabling RSS");
-
+ rss_params.update_rss_config = 1;
+ /* tbl_size has to be set with capabilities */
+ rss_params.rss_table_size_log = 7;
vport_update_params.vport_id = 0;
+ /* pass the L2 handles instead of qids */
+ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
+ idx = qdev->rss_ind_table[i];
+ rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
+ }
+ vport_update_params.rss_params = &rss_params;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "vport-update for RSS failed\n");
+ return rc;
+ }
+ }
+ qdev->rss_enable = rss_params.rss_enable;
+
+ /* Update local structure for hash query */
+ qdev->rss_conf.rss_hf = hf;
+ qdev->rss_conf.rss_key_len = len;
+ if (qdev->rss_enable) {
+ if (qdev->rss_conf.rss_key == NULL) {
+ qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
+ if (qdev->rss_conf.rss_key == NULL) {
+ DP_ERR(edev, "No memory to store RSS key\n");
+ return -ENOMEM;
+ }
+ }
+ if (key && len) {
+ DP_INFO(edev, "Storing RSS key\n");
+ memcpy(qdev->rss_conf.rss_key, key, len);
+ }
+ } else if (!qdev->rss_enable && len == 0) {
+ if (qdev->rss_conf.rss_key) {
+ free(qdev->rss_conf.rss_key);
+ qdev->rss_conf.rss_key = NULL;
+ DP_INFO(edev, "Free RSS key\n");
+ }
+ }
- return qdev->ops->vport_update(edev, &vport_update_params);
+ return 0;
}
-int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- uint64_t hf;
-
- if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
- return -EINVAL;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- if (rss_conf->rss_key)
- memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
- sizeof(qdev->rss_params.rss_key));
-
- hf = 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ?
- ETH_RSS_IPV4 : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
- ETH_RSS_IPV6 : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
- ETH_RSS_IPV6_EX : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
- ETH_RSS_NONFRAG_IPV4_TCP : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
- ETH_RSS_NONFRAG_IPV6_TCP : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
- ETH_RSS_IPV6_TCP_EX : 0;
-
- rss_conf->rss_hf = hf;
+ rss_conf->rss_hf = qdev->rss_conf.rss_hf;
+ rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
+ if (rss_conf->rss_key && qdev->rss_conf.rss_key)
+ memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
+ rss_conf->rss_key_len);
return 0;
}
-static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
+static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
+ struct ecore_rss_params *rss)
{
- struct qed_update_vport_params vport_update_params;
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ int i, fn;
+ bool rss_mode = 1; /* enable */
+ struct ecore_queue_cid *cid;
+ struct ecore_rss_params *t_rss;
+
+ /* In regular scenario, we'd simply need to take input handlers.
+ * But in CMT, we'd have to split the handlers according to the
+ * engine they were configured on. We'd then have to understand
+ * whether RSS is really required, since 2-queues on CMT doesn't
+ * require RSS.
+ */
+
+ /* CMT should be round-robin */
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ cid = rss->rss_ind_table[i];
+
+ if (cid->p_owner == ECORE_LEADING_HWFN(edev))
+ t_rss = &rss[0];
+ else
+ t_rss = &rss[1];
+
+ t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
+ }
+
+ t_rss = &rss[1];
+ t_rss->update_rss_ind_table = 1;
+ t_rss->rss_table_size_log = 7;
+ t_rss->update_rss_config = 1;
+
+ /* Make sure RSS is actually required */
+ for_each_hwfn(edev, fn) {
+ for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
+ i++) {
+ if (rss[fn].rss_ind_table[i] !=
+ rss[fn].rss_ind_table[0])
+ break;
+ }
+
+ if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
+ DP_INFO(edev,
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ rss_mode = 0;
+ goto out;
+ }
+ }
+
+out:
+ t_rss->rss_enable = rss_mode;
+
+ return rss_mode;
+}
+
+int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params vport_update_params;
+ struct ecore_rss_params *params;
+ struct ecore_hwfn *p_hwfn;
uint16_t i, idx, shift;
+ uint8_t entry;
+ int rc = 0;
if (reta_size > ETH_RSS_RETA_SIZE_128) {
DP_ERR(edev, "reta_size %d is not supported by hardware\n",
@@ -1375,42 +1651,71 @@ static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
}
memset(&vport_update_params, 0, sizeof(vport_update_params));
- memcpy(&vport_update_params.rss_params, &qdev->rss_params,
- sizeof(vport_update_params.rss_params));
+ params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
+ RTE_CACHE_LINE_SIZE);
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
- uint8_t entry = reta_conf[idx].reta[shift];
- qdev->rss_params.rss_ind_table[i] = entry;
+ entry = reta_conf[idx].reta[shift];
+ /* Pass rxq handles to ecore */
+ params->rss_ind_table[i] =
+ qdev->fp_array[entry].rxq->handle;
+ /* Update the local copy for RETA query command */
+ qdev->rss_ind_table[i] = entry;
}
}
- vport_update_params.update_rss_flg = 1;
+ params->update_rss_ind_table = 1;
+ params->rss_table_size_log = 7;
+ params->update_rss_config = 1;
+
+ /* Fix up RETA for CMT mode device */
+ if (edev->num_hwfns > 1)
+ qdev->rss_enable = qede_update_rss_parm_cmt(edev,
+ params);
vport_update_params.vport_id = 0;
+ /* Use the current value of rss_enable */
+ params->rss_enable = qdev->rss_enable;
+ vport_update_params.rss_params = params;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "vport-update for RSS failed\n");
+ goto out;
+ }
+ }
- return qdev->ops->vport_update(edev, &vport_update_params);
+out:
+ rte_free(params);
+ return rc;
}
-int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
+static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
uint16_t i, idx, shift;
+ uint8_t entry;
if (reta_size > ETH_RSS_RETA_SIZE_128) {
- struct ecore_dev *edev = &qdev->edev;
DP_ERR(edev, "reta_size %d is not supported\n",
reta_size);
+ return -EINVAL;
}
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
- uint8_t entry = qdev->rss_params.rss_ind_table[i];
+ entry = qdev->rss_ind_table[i];
reta_conf[idx].reta[shift] = entry;
}
}
@@ -1418,34 +1723,339 @@ int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
return 0;
}
-int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
- uint32_t frame_size;
- struct qede_dev *qdev = dev->data->dev_private;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_dev_info dev_info = {0};
+ struct qede_fastpath *fp;
+ uint32_t frame_size;
+ uint16_t rx_buf_size;
+ uint16_t bufsz;
+ int i;
+ PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
-
- /* VLAN_TAG = 4 */
- frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
-
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ frame_size = mtu + QEDE_ETH_OVERHEAD;
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+ DP_ERR(edev, "MTU %u out of range\n", mtu);
return -EINVAL;
-
+ }
if (!dev->data->scattered_rx &&
- frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
+ dev->data->min_rx_buf_size);
return -EINVAL;
-
+ }
+ /* Temporarily replace I/O functions with dummy ones. It cannot
+ * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+ */
+ dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+ dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+ qede_dev_stop(dev);
+ rte_delay_ms(1000);
+ qdev->mtu = mtu;
+ /* Fix up RX buf size for all queues of the port */
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+ }
+ }
+ qede_dev_start(dev);
if (frame_size > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
-
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
- qdev->mtu = mtu;
- qede_dev_stop(dev);
- qede_dev_start(dev);
+ /* Reassign back */
+ dev->rx_pkt_burst = qede_recv_pkts;
+ dev->tx_pkt_burst = qede_xmit_pkts;
+
+ return 0;
+}
+
+static int
+qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ struct ecore_hwfn *p_hwfn;
+ int rc, i;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+ if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
+ QEDE_VXLAN_DEF_PORT;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
+}
+
+static int
+qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
+}
+
+static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
+ uint32_t *clss, char *str)
+{
+ uint16_t j;
+ *clss = MAX_ECORE_TUNN_CLSS;
+
+ for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
+ if (filter == qede_tunn_types[j].rte_filter_type) {
+ *type = qede_tunn_types[j].qede_type;
+ *clss = qede_tunn_types[j].qede_tunn_clss;
+ strcpy(str, qede_tunn_types[j].string);
+ return;
+ }
+ }
+}
+
+static int
+qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ uint32_t type)
+{
+ /* Init commmon ucast params first */
+ qede_set_ucast_cmn_params(ucast);
+
+ /* Copy out the required fields based on classification type */
+ ucast->type = type;
+
+ switch (type) {
+ case ECORE_FILTER_VNI:
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ ucast->vlan = conf->inner_vlan;
+ break;
+ case ECORE_FILTER_MAC:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vlan = conf->inner_vlan;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ const struct rte_eth_tunnel_filter_conf *conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn;
+ struct ecore_hwfn *p_hwfn;
+ enum ecore_filter_ucast_type type;
+ enum ecore_tunn_clss clss;
+ struct ecore_filter_ucast ucast;
+ char str[80];
+ uint16_t filter_type;
+ int rc, i;
+
+ filter_type = conf->filter_type | qdev->vxlan_filter_type;
+ /* First determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+ if (clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Wrong filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ucast.opcode = ECORE_FILTER_ADD;
+
+ /* Skip MAC/VLAN if filter is based on VNI */
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, 1);
+ if (rc == 0) {
+ /* Enable accept anyvlan */
+ qede_config_accept_any_vlan(qdev, true);
+ }
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, 1);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ qdev->vxlan_filter_type = filter_type;
+
+ DP_INFO(edev, "Enabling VXLAN tunneling\n");
+ qede_set_cmn_tunn_param(&tunn, clss, true, true);
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
+ &tunn, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ }
+ }
+ qdev->num_tunn_filters++; /* Filter added successfully */
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ucast.opcode = ECORE_FILTER_REMOVE;
+
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, 0);
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, 0);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ qdev->vxlan_filter_type = filter_type;
+ qdev->num_tunn_filters--;
+
+ /* Disable VXLAN if VXLAN filters become 0 */
+ if (qdev->num_tunn_filters == 0) {
+ DP_INFO(edev, "Disabling VXLAN tunneling\n");
+
+ /* Use 0 as tunnel mode */
+ qede_set_cmn_tunn_param(&tunn, clss, false, true);
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev,
+ "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported operation %d\n", filter_op);
+ return -EINVAL;
+ }
+ DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
+
+ return 0;
+}
+
+int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_tunnel_filter_conf *filter_conf =
+ (struct rte_eth_tunnel_filter_conf *)arg;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_TUNNEL:
+ switch (filter_conf->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ DP_INFO(edev,
+ "Packet steering to the specified Rx queue"
+ " is not supported with VXLAN tunneling");
+ return(qede_vxlan_tunn_config(eth_dev, filter_op,
+ filter_conf));
+ /* Place holders for future tunneling support */
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_TUNNEL_TYPE_NVGRE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ DP_ERR(edev, "Unsupported tunnel type %d\n",
+ filter_conf->tunnel_type);
+ return -EINVAL;
+ case RTE_TUNNEL_TYPE_NONE:
+ default:
+ return 0;
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ return qede_fdir_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_NTUPLE:
+ return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_MACVLAN:
+ case RTE_ETH_FILTER_ETHERTYPE:
+ case RTE_ETH_FILTER_FLEXIBLE:
+ case RTE_ETH_FILTER_SYN:
+ case RTE_ETH_FILTER_HASH:
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ case RTE_ETH_FILTER_MAX:
+ default:
+ DP_ERR(edev, "Unsupported filter type %d\n",
+ filter_type);
+ return -EINVAL;
+ }
return 0;
}
@@ -1485,6 +2095,9 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.reta_update = qede_rss_reta_update,
.reta_query = qede_rss_reta_query,
.mtu_set = qede_set_mtu,
+ .filter_ctrl = qede_dev_filter_ctrl,
+ .udp_tunnel_port_add = qede_udp_dst_port_add,
+ .udp_tunnel_port_del = qede_udp_dst_port_del,
};
static const struct eth_dev_ops qede_eth_vf_dev_ops = {
@@ -1522,9 +2135,10 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
static void qede_update_pf_params(struct ecore_dev *edev)
{
struct ecore_pf_params pf_params;
- /* 32 rx + 32 tx */
+
memset(&pf_params, 0, sizeof(struct ecore_pf_params));
- pf_params.eth_pf_params.num_cons = 64;
+ pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
+ pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
qed_ops->common->update_pf_params(edev, &pf_params);
}
@@ -1544,13 +2158,13 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Fix up ecore debug level */
uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
uint8_t dp_level = ECORE_LEVEL_VERBOSE;
- uint32_t max_mac_addrs;
int rc;
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
edev = &adapter->edev;
- pci_addr = eth_dev->pci_dev->addr;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_addr = pci_dev->addr;
PMD_INIT_FUNC_TRACE(edev);
@@ -1560,6 +2174,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->rx_pkt_burst = qede_recv_pkts;
eth_dev->tx_pkt_burst = qede_xmit_pkts;
+ eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
DP_NOTICE(edev, false,
@@ -1567,8 +2182,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
return 0;
}
- pci_dev = eth_dev->pci_dev;
-
rte_eth_copy_pci_info(eth_dev, pci_dev);
/* @DPDK */
@@ -1593,10 +2206,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
qede_update_pf_params(edev);
- rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
+ rte_intr_callback_register(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
- if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
+ if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
}
@@ -1646,20 +2259,20 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
qede_alloc_etherdev(adapter, &dev_info);
- adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION);
+ adapter->ops->common->set_name(edev, edev->name);
if (!is_vf)
- adapter->dev_info.num_mac_addrs =
+ adapter->dev_info.num_mac_filters =
(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
ECORE_MAC);
else
ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
- &adapter->dev_info.num_mac_addrs);
+ (uint32_t *)&adapter->dev_info.num_mac_filters);
/* Allocate memory for storing MAC addr */
eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
(ETHER_ADDR_LEN *
- adapter->dev_info.num_mac_addrs),
+ adapter->dev_info.num_mac_filters),
RTE_CACHE_LINE_SIZE);
if (eth_dev->data->mac_addrs == NULL) {
@@ -1702,7 +2315,9 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
if (do_once) {
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
qede_print_adapter_info(adapter);
+#endif
do_once = false;
}
@@ -1760,64 +2375,96 @@ static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
return qede_dev_common_uninit(eth_dev);
}
-static struct rte_pci_id pci_id_qedevf_map[] = {
+static const struct rte_pci_id pci_id_qedevf_map[] = {
#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
{
- QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
+ },
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
},
{
- QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
},
{.vendor_id = 0,}
};
-static struct rte_pci_id pci_id_qede_map[] = {
+static const struct rte_pci_id pci_id_qede_map[] = {
#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
},
{.vendor_id = 0,}
};
-static struct eth_driver rte_qedevf_pmd = {
- .pci_drv = {
- .id_table = pci_id_qedevf_map,
- .drv_flags =
- RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = qedevf_eth_dev_init,
- .eth_dev_uninit = qedevf_eth_dev_uninit,
- .dev_private_size = sizeof(struct qede_dev),
+static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct qede_dev), qedevf_eth_dev_init);
+}
+
+static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qedevf_pmd = {
+ .id_table = pci_id_qedevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = qedevf_eth_dev_pci_probe,
+ .remove = qedevf_eth_dev_pci_remove,
};
-static struct eth_driver rte_qede_pmd = {
- .pci_drv = {
- .id_table = pci_id_qede_map,
- .drv_flags =
- RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = qede_eth_dev_init,
- .eth_dev_uninit = qede_eth_dev_uninit,
- .dev_private_size = sizeof(struct qede_dev),
+static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct qede_dev), qede_eth_dev_init);
+}
+
+static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qede_pmd = {
+ .id_table = pci_id_qede_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = qede_eth_dev_pci_probe,
+ .remove = qede_eth_dev_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
-RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio");