summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorHanoh Haim <hhaim@cisco.com>2016-08-03 14:58:07 +0300
committerHanoh Haim <hhaim@cisco.com>2016-08-03 14:58:07 +0300
commitbcc2ca1a462ac65dec74e65c81e633e4f30d7fc1 (patch)
tree8b0e84d27ad304bd48f129b75583d176f55abeac /src
parent88b9d2dd713da99e35a20e20008484017c6ec907 (diff)
another latency improvment - see trex-214. remove old workaround and add TX QSet with strict priorty
Diffstat (limited to 'src')
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev.c88
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx.c51
2 files changed, 73 insertions, 66 deletions
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev.c b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
index 2ee9326e..be85794d 100644
--- a/src/dpdk/drivers/net/i40e/i40e_ethdev.c
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
@@ -4168,10 +4168,10 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
/* create floating veb if vsi is NULL */
if (vsi != NULL) {
ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
- I40E_DEFAULT_TCMAP, false,
+ vsi->enabled_tc, false,
&veb->seid, false, NULL);
} else {
- ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
+ ret = i40e_aq_add_veb(hw, 0, 0, vsi->enabled_tc,
true, &veb->seid, false, NULL);
}
@@ -4345,6 +4345,37 @@ i40e_vsi_update_tc_max_bw(struct i40e_vsi *vsi, u16 credit){
}
return (0);
}
+
+static int
+i40e_vsi_update_tc_bandwidth_ex(struct i40e_vsi *vsi)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int i, ret;
+ struct i40e_aqc_configure_vsi_tc_bw_data tc_bw_data;
+
+ if (!vsi->seid) {
+ PMD_DRV_LOG(ERR, "seid not valid");
+ return -EINVAL;
+ }
+
+ memset(&tc_bw_data, 0, sizeof(tc_bw_data));
+ tc_bw_data.tc_valid_bits = 3;
+ tc_bw_data.tc_bw_credits[0]=1;
+ tc_bw_data.tc_bw_credits[1]=127;
+
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw_data, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure TC BW");
+ return ret;
+ }
+ vsi->enabled_tc=3;
+
+ (void)rte_memcpy(vsi->info.qs_handle, tc_bw_data.qs_handles,
+ sizeof(vsi->info.qs_handle));
+ return I40E_SUCCESS;
+}
+
+
#endif
#endif
@@ -5033,38 +5064,6 @@ i40e_pf_setup(struct i40e_pf *pf)
}
pf->main_vsi = vsi;
-#ifdef TREX_PATCH
-#ifdef LOW_LATENCY_WORKAROUND
- /*
- Workaround for low latency issue.
- It seems RR does not work as expected both from same QSet and from different QSet
- Quanta could be very high and this creates very high latency, especially with long packet size (9K)
- This is a workaround limit the main (bulk) VSI to 99% of the BW and by that support low latency (suggested by Intel)
- ETS with with strict priority and 127 credit does not work .
- */
-
- if (hw->phy.link_info.link_speed == I40E_LINK_SPEED_10GB) {
- i40e_vsi_update_tc_max_bw(vsi,199);
- }else{
- if (hw->phy.link_info.link_speed == I40E_LINK_SPEED_40GB) {
- i40e_vsi_update_tc_max_bw(vsi,799);
- }else{
- PMD_DRV_LOG(ERR, "Unknown phy speed %d",hw->phy.link_info.link_speed);
- }
- }
-
- /* add for low latency a new VSI for Queue set */
- vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi, 0);
- if (!vsi) {
- PMD_DRV_LOG(ERR, "Setup of low latency vsi failed");
- return I40E_ERR_NOT_READY;
- }
-
- pf->ll_vsi = vsi;
-
-#endif
-#endif
-
/* Configure filter control */
memset(&settings, 0, sizeof(settings));
if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
@@ -9334,8 +9333,16 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
hw->local_dcbx_config.etscfg.willing = 0;
hw->local_dcbx_config.etscfg.maxtcs = 0;
hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
- hw->local_dcbx_config.etscfg.tsatable[0] =
- I40E_IEEE_TSA_ETS;
+ hw->local_dcbx_config.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
+
+#ifdef LOW_LATENCY_WORKAROUND
+
+ hw->local_dcbx_config.etscfg.tcbwtable[1] = 0;
+ hw->local_dcbx_config.etscfg.tsatable[1] = I40E_IEEE_TSA_STRICT;
+ hw->local_dcbx_config.etscfg.prioritytable[1] = 1;
+#endif
+
+
hw->local_dcbx_config.etsrec =
hw->local_dcbx_config.etscfg;
hw->local_dcbx_config.pfc.willing = 0;
@@ -9355,6 +9362,15 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
hw->aq.asq_last_status);
return -ENOSYS;
}
+
+#ifdef LOW_LATENCY_WORKAROUND
+ if (i40e_vsi_update_tc_bandwidth_ex(pf->main_vsi) !=
+ I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
+ return -ENOSYS;
+ }
+#endif
+
} else {
PMD_INIT_LOG(INFO, "DCBX configuration failed, err = %d,"
" aq_err = %d.", ret,
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx.c b/src/dpdk/drivers/net/i40e/i40e_rxtx.c
index 84c7b2dc..0c191068 100644
--- a/src/dpdk/drivers/net/i40e/i40e_rxtx.c
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx.c
@@ -1933,31 +1933,6 @@ i40e_xmit_pkts_simple(void *tx_queue,
// TREX_PATCH
// Based on i40e_pf_get_vsi_by_qindex. Return low latency VSI one queue.
#define LOW_LATENCY_WORKAROUND
-#ifdef LOW_LATENCY_WORKAROUND
-static struct i40e_vsi*
-i40e_pf_tx_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
-{
- // For last queue index, return low latency VSI
- if (queue_idx == pf->dev_data->nb_tx_queues-1) {
- return pf->ll_vsi;
- }
-
- /* the queue in MAIN VSI range */
- if (queue_idx < pf->dev_data->nb_tx_queues)
- return pf->main_vsi;
-
-
- queue_idx -= pf->main_vsi->nb_qps;
-
- /* queue_idx is greater than VMDQ VSIs range */
- if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {
- PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?");
- return NULL;
- }
-
- return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;
-}
-#endif
/*
* Find the VSI the queue belongs to. 'queue_idx' is the queue index
@@ -2301,6 +2276,11 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
ad->rx_bulk_alloc_allowed = false;
}
+ #ifdef LOW_LATENCY_WORKAROUND
+ rxq->dcb_tc =0;
+
+ #else
+
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
continue;
@@ -2313,6 +2293,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
rxq->dcb_tc = i;
}
+ #endif
return 0;
}
@@ -2407,19 +2388,22 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t i, base, bsf, tc_mapping;
+ u8 low_latency=0;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
struct i40e_vf *vf =
I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
} else {
-#ifdef LOW_LATENCY_WORKAROUND
- vsi = i40e_pf_tx_get_vsi_by_qindex(pf, queue_idx);
-#else
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
-#endif
}
+#ifdef LOW_LATENCY_WORKAROUND
+ if (queue_idx == pf->dev_data->nb_tx_queues-1) {
+ low_latency= 1;
+ }
+#endif
+
if (vsi == NULL) {
PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
"exceeds the maximum", queue_idx);
@@ -2574,6 +2558,13 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Use a simple TX queue without offloads or multi segs if possible */
i40e_set_tx_function_flag(dev, txq);
+#ifdef LOW_LATENCY_WORKAROUND
+ if (low_latency) {
+ txq->dcb_tc=1;
+ }else{
+ txq->dcb_tc=0;
+ }
+#else
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
continue;
@@ -2586,7 +2577,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
txq->dcb_tc = i;
}
-
+#endif
return 0;
}