summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIdo Barnea <ibarnea@cisco.com>2016-08-08 13:18:36 +0300
committerIdo Barnea <ibarnea@cisco.com>2016-08-08 13:18:36 +0300
commit2d51b632b8b22b6e5be9ed1cbc110bfd4308c2e6 (patch)
tree8205d26940b42d0a19c87be5598b43bf059c56df
parentdc56569b146966c17433fef70d4eb5456871dcc6 (diff)
Changing DPDK latency patch, so we can easily merge it for next DPDK version
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev.c41
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx.c20
2 files changed, 21 insertions, 40 deletions
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev.c b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
index d9d2b969..12402ae2 100644
--- a/src/dpdk/drivers/net/i40e/i40e_ethdev.c
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
@@ -756,6 +756,7 @@ static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
#define TREX_PATCH
+#define TREX_PATCH_LOW_LATENCY
#ifdef TREX_PATCH
// 0 - statfull mode. 1 stateless.
@@ -2373,19 +2374,6 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
i40e_update_vsi_stats(pf->main_vsi);
}
-// TREX_PATCH
-int
-i40e_trex_get_speed(struct rte_eth_dev *dev)
-{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (i40e_is_40G_device(hw->device_id)) {
- return 40;
- } else {
- return 10;
- }
-}
-
//TREX_PATCH
// fill stats array with fdir rules match count statistics
// Notice that we read statistics from start to start + len, but we fill the stats are
@@ -4174,10 +4162,18 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
/* create floating veb if vsi is NULL */
if (vsi != NULL) {
ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid,
- vsi->enabled_tc, false,
+#ifdef TREX_PATCH_LOW_LATENCY
+ vsi->enabled_tc, false,
+#else
+ I40E_DEFAULT_TCMAP, false,
+#endif
&veb->seid, false, NULL);
} else {
+#ifdef TREX_PATCH_LOW_LATENCY
ret = i40e_aq_add_veb(hw, 0, 0, vsi->enabled_tc,
+#else
+ ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP,
+#endif
true, &veb->seid, false, NULL);
}
@@ -4331,9 +4327,7 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
return i40e_vsi_add_mac(vsi, &filter);
}
-#ifdef TREX_PATCH
-#define LOW_LATENCY_WORKAROUND
-#ifdef LOW_LATENCY_WORKAROUND
+#ifdef TREX_PATCH_LOW_LATENCY
static int
i40e_vsi_update_tc_max_bw(struct i40e_vsi *vsi, u16 credit){
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
@@ -4382,9 +4376,6 @@ i40e_vsi_update_tc_bandwidth_ex(struct i40e_vsi *vsi)
return I40E_SUCCESS;
}
-
-
-#endif
#endif
/*
@@ -9342,15 +9333,11 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
hw->local_dcbx_config.etscfg.maxtcs = 0;
hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
hw->local_dcbx_config.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
-
-#ifdef LOW_LATENCY_WORKAROUND
-
+#ifdef TREX_PATCH_LOW_LATENCY
hw->local_dcbx_config.etscfg.tcbwtable[1] = 0;
hw->local_dcbx_config.etscfg.tsatable[1] = I40E_IEEE_TSA_STRICT;
hw->local_dcbx_config.etscfg.prioritytable[1] = 1;
#endif
-
-
hw->local_dcbx_config.etsrec =
hw->local_dcbx_config.etscfg;
hw->local_dcbx_config.pfc.willing = 0;
@@ -9370,15 +9357,13 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
hw->aq.asq_last_status);
return -ENOSYS;
}
-
-#ifdef LOW_LATENCY_WORKAROUND
+#ifdef TREX_PATCH_LOW_LATENCY
if (i40e_vsi_update_tc_bandwidth_ex(pf->main_vsi) !=
I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to update TC bandwidth");
return -ENOSYS;
}
#endif
-
} else {
PMD_INIT_LOG(INFO, "DCBX configuration failed, err = %d,"
" aq_err = %d.", ret,
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx.c b/src/dpdk/drivers/net/i40e/i40e_rxtx.c
index 0c191068..8fdf30c6 100644
--- a/src/dpdk/drivers/net/i40e/i40e_rxtx.c
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx.c
@@ -1930,10 +1930,6 @@ i40e_xmit_pkts_simple(void *tx_queue,
return nb_tx;
}
-// TREX_PATCH
-// Based on i40e_pf_get_vsi_by_qindex. Return low latency VSI one queue.
-#define LOW_LATENCY_WORKAROUND
-
/*
* Find the VSI the queue belongs to. 'queue_idx' is the queue index
* application used, which assume having sequential ones. But from driver's
@@ -2276,10 +2272,10 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
ad->rx_bulk_alloc_allowed = false;
}
- #ifdef LOW_LATENCY_WORKAROUND
+#define TREX_PATCH_LOW_LATENCY
+#ifdef TREX_PATCH_LOW_LATENCY
rxq->dcb_tc =0;
-
- #else
+#else
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->enabled_tc & (1 << i)))
@@ -2293,7 +2289,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (queue_idx >= base && queue_idx < (base + BIT(bsf)))
rxq->dcb_tc = i;
}
- #endif
+#endif
return 0;
}
@@ -2388,7 +2384,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
uint16_t i, base, bsf, tc_mapping;
- u8 low_latency=0;
if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
struct i40e_vf *vf =
@@ -2398,9 +2393,10 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
}
-#ifdef LOW_LATENCY_WORKAROUND
+#ifdef TREX_PATCH_LOW_LATENCY
+ u8 low_latency = 0;
if (queue_idx == pf->dev_data->nb_tx_queues-1) {
- low_latency= 1;
+ low_latency = 1;
}
#endif
@@ -2558,7 +2554,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
/* Use a simple TX queue without offloads or multi segs if possible */
i40e_set_tx_function_flag(dev, txq);
-#ifdef LOW_LATENCY_WORKAROUND
+#ifdef TREX_PATCH_LOW_LATENCY
if (low_latency) {
txq->dcb_tc=1;
}else{