summaryrefslogtreecommitdiffstats
path: root/src/dpdk
diff options
context:
space:
mode:
authorIdo Barnea <ibarnea@cisco.com>2016-07-19 10:12:20 +0300
committerIdo Barnea <ibarnea@cisco.com>2016-07-24 14:01:11 +0300
commit8b0119ed99f893106a560ccb9db31c99e1d293eb (patch)
tree7e3eb6ef214af20fa975f28779a28479bfd73d5a /src/dpdk
parent3c106ce73a4a54863ed1c3df47c09eb1e63fadaf (diff)
dpdk0716 move:DPDK file patches
Diffstat (limited to 'src/dpdk')
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev.c185
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev.h1
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_fdir.c26
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_rxtx.c37
-rw-r--r--src/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h2
-rw-r--r--src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c18
-rw-r--r--src/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c3
-rw-r--r--src/dpdk/lib/librte_ether/rte_eth_ctrl.h5
-rw-r--r--src/dpdk/lib/librte_ether/rte_ethdev.c34
-rw-r--r--src/dpdk/lib/librte_ether/rte_ethdev.h2
-rw-r--r--src/dpdk/lib/librte_mbuf/rte_mbuf.h5
11 files changed, 302 insertions, 16 deletions
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev.c b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
index 3f9f05ee..94b8cb7d 100644
--- a/src/dpdk/drivers/net/i40e/i40e_ethdev.c
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
@@ -755,6 +755,62 @@ static inline void i40e_flex_payload_reg_init(struct i40e_hw *hw)
#define I40E_FLOW_CONTROL_ETHERTYPE 0x8808
+#define TREX_PATCH
+#ifdef TREX_PATCH
+
+// 0 - statfull mode. 1 stateless.
+static int trex_mode=0;
+void i40e_set_trex_mode(int mode) {
+ trex_mode = mode;
+}
+
+static void i40e_dump_filter_regs(struct i40e_hw *hw)
+{
+ int reg_nums[] = {31, 33, 34, 35, 41, 43};
+ int i;
+ uint32_t reg;
+
+ for (i =0; i < sizeof (reg_nums)/sizeof(int); i++) {
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(reg_nums[i], 0));
+ printf("I40E_PRTQF_FD_INSET(%d, 0): 0x%08x\n", reg_nums[i], reg);
+ reg = I40E_READ_REG(hw,I40E_PRTQF_FD_INSET(reg_nums[i], 1));
+ printf("I40E_PRTQF_FD_INSET(%d, 1): 0x%08x\n", reg_nums[i], reg);
+ }
+}
+
+static inline void i40e_filter_fields_reg_init(struct i40e_hw *hw)
+{
+ uint32_t reg;
+
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(12), 0x00000062);
+ I40E_WRITE_REG(hw, I40E_GLQF_PIT(2), 0x000024A0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(31, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(33, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(41, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(41, 1), 0x00080000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(43, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(43, 1), 0x00080000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(34, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(34, 1), 0x00040000);
+ // filter IP according to ttl and L4 protocol
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(35, 0), 0);
+ if (trex_mode == 1) {
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(35, 1), 0x00100000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(31, 1), 0x00100000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(33, 1), 0x00100000);
+ } else {
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(35, 1), 0x00040000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(31, 1), 0x00040000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(33, 1), 0x00040000);
+ }
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(44, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(44, 1), 0x00080000);
+ I40E_WRITE_REG(hw, I40E_GLQF_FD_MSK(0, 34), 0x000DFF00);
+ I40E_WRITE_REG(hw, I40E_GLQF_FD_MSK(0,44), 0x000C00FF);
+ I40E_WRITE_FLUSH(hw);
+}
+#endif //TREX_PATCH
+
/*
* Add a ethertype filter to drop all flow control frames transmitted
* from VSIs.
@@ -1006,10 +1062,14 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
* for flexible payload by software.
* It should be removed once issues are fixed in NVM.
*/
+#ifdef TREX_PATCH
+ i40e_filter_fields_reg_init(hw);
+#else
i40e_flex_payload_reg_init(hw);
/* Initialize the input set for filters (hash and fd) to default value */
i40e_filter_input_set_init(pf);
+#endif
/* Initialize the parameters for adminq */
i40e_init_adminq_parameter(hw);
@@ -2293,9 +2353,11 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
I40E_GLPRT_PTC9522L(hw->port),
pf->offset_loaded, &os->tx_size_big,
&ns->tx_size_big);
+#ifndef TREX_PATCH
i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index),
pf->offset_loaded,
&os->fd_sb_match, &ns->fd_sb_match);
+#endif
/* GLPRT_MSPDC not supported */
/* GLPRT_XEC not supported */
@@ -2305,6 +2367,49 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw)
i40e_update_vsi_stats(pf->main_vsi);
}
+// TREX_PATCH
+int
+i40e_trex_get_speed(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (i40e_is_40G_device(hw->device_id)) {
+ return 40;
+ } else {
+ return 10;
+ }
+}
+
+//TREX_PATCH
+// fill stats array with fdir rules match count statistics
+// Notice that we read statistics from start to start + len, but we fill the stats are
+// starting from 0 with len values
+void
+i40e_trex_fdir_stats_get(struct rte_eth_dev *dev, uint32_t *stats, uint32_t start, uint32_t len)
+{
+ int i;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ for (i = 0; i < len; i++) {
+ stats[i] = I40E_READ_REG(hw, I40E_GLQF_PCNT(i + start));
+ }
+}
+
+// TREX_PATCH
+void
+i40e_trex_fdir_stats_reset(struct rte_eth_dev *dev, uint32_t *stats, uint32_t start, uint32_t len)
+{
+ int i;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ for (i = 0; i < len; i++) {
+ if (stats) {
+ stats[i] = I40E_READ_REG(hw, I40E_GLQF_PCNT(i + start));
+ }
+ I40E_WRITE_REG(hw, I40E_GLQF_PCNT(i + start), 0xffffffff);
+ }
+}
+
/* Get all statistics of a port */
static void
i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
@@ -2321,10 +2426,17 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
pf->main_vsi->eth_stats.rx_multicast +
pf->main_vsi->eth_stats.rx_broadcast -
pf->main_vsi->eth_stats.rx_discards;
+#ifndef TREX_PATCH
stats->opackets = pf->main_vsi->eth_stats.tx_unicast +
pf->main_vsi->eth_stats.tx_multicast +
pf->main_vsi->eth_stats.tx_broadcast;
stats->ibytes = ns->eth.rx_bytes;
+#else
+ /* Hanoch: move to global transmit and not pf->vsi and we have two high and low priorty */
+ stats->opackets = ns->eth.tx_unicast +ns->eth.tx_multicast +ns->eth.tx_broadcast;
+ stats->ibytes = pf->main_vsi->eth_stats.rx_bytes;
+#endif
+
stats->obytes = ns->eth.tx_bytes;
stats->oerrors = ns->eth.tx_errors +
pf->main_vsi->eth_stats.tx_errors;
@@ -2628,10 +2740,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
if (i40e_is_40G_device(hw->device_id))
/* For XL710 */
- dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
+ dev_info->speed_capa = ETH_LINK_SPEED_40G;
else
/* For X710 */
- dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G;
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
}
static int
@@ -4214,6 +4326,29 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
return i40e_vsi_add_mac(vsi, &filter);
}
+#ifdef TREX_PATCH
+#define LOW_LATENCY_WORKAROUND
+#ifdef LOW_LATENCY_WORKAROUND
+static int
+i40e_vsi_update_tc_max_bw(struct i40e_vsi *vsi, u16 credit){
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ int ret;
+
+ if (!vsi->seid) {
+ PMD_DRV_LOG(ERR, "seid not valid");
+ return -EINVAL;
+ }
+
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, credit,0, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to configure TC BW");
+ return ret;
+ }
+ return (0);
+}
+#endif
+#endif
+
/*
* i40e_vsi_get_bw_config - Query VSI BW Information
* @vsi: the VSI to be queried
@@ -4289,7 +4424,8 @@ i40e_enable_pf_lb(struct i40e_pf *pf)
/* Use the FW API if FW >= v5.0 */
if (hw->aq.fw_maj_ver < 5) {
- PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
+ //TREX_PATCH - changed from ERR to INFO. Most of our customers do not have latest FW
+ PMD_INIT_LOG(INFO, "FW < v5.0, cannot enable loopback");
return;
}
@@ -4897,6 +5033,38 @@ i40e_pf_setup(struct i40e_pf *pf)
}
pf->main_vsi = vsi;
+#ifdef TREX_PATCH
+#ifdef LOW_LATENCY_WORKAROUND
+ /*
+ Workaround for low latency issue.
+ It seems RR does not work as expected both from same QSet and from different QSet
+ Quanta could be very high and this creates very high latency, especially with long packet size (9K)
+ This is a workaround limit the main (bulk) VSI to 99% of the BW and by that support low latency (suggested by Intel)
+ ETS with with strict priority and 127 credit does not work .
+ */
+
+ if (hw->phy.link_info.link_speed == I40E_LINK_SPEED_10GB) {
+ i40e_vsi_update_tc_max_bw(vsi,199);
+ }else{
+ if (hw->phy.link_info.link_speed == I40E_LINK_SPEED_40GB) {
+ i40e_vsi_update_tc_max_bw(vsi,799);
+ }else{
+ PMD_DRV_LOG(ERR, "Unknown phy speed %d",hw->phy.link_info.link_speed);
+ }
+ }
+
+ /* add for low latency a new VSI for Queue set */
+ vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi, 0);
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Setup of low latency vsi failed");
+ return I40E_ERR_NOT_READY;
+ }
+
+ pf->ll_vsi = vsi;
+
+#endif
+#endif
+
/* Configure filter control */
memset(&settings, 0, sizeof(settings));
if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_128)
@@ -9131,6 +9299,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
*
* Returns 0 on success, negative value on failure
*/
+//TREX_PATCH - changed all ERR to INFO in below func
static int
i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
{
@@ -9139,7 +9308,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
int ret = 0;
if ((pf->flags & I40E_FLAG_DCB) == 0) {
- PMD_INIT_LOG(ERR, "HW doesn't support DCB");
+ PMD_INIT_LOG(INFO, "HW doesn't support DCB");
return -ENOTSUP;
}
@@ -9181,13 +9350,13 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
I40E_APP_PROTOID_FCOE;
ret = i40e_set_dcb_config(hw);
if (ret) {
- PMD_INIT_LOG(ERR, "default dcb config fails."
+ PMD_INIT_LOG(INFO, "default dcb config fails."
" err = %d, aq_err = %d.", ret,
hw->aq.asq_last_status);
return -ENOSYS;
}
} else {
- PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
+ PMD_INIT_LOG(INFO, "DCBX configuration failed, err = %d,"
" aq_err = %d.", ret,
hw->aq.asq_last_status);
return -ENOTSUP;
@@ -9200,12 +9369,12 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
ret = i40e_init_dcb(hw);
if (!ret) {
if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
- PMD_INIT_LOG(ERR, "HW doesn't support"
+ PMD_INIT_LOG(INFO, "HW doesn't support"
" DCBX offload.");
return -ENOTSUP;
}
} else {
- PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
+ PMD_INIT_LOG(INFO, "DCBX configuration failed, err = %d,"
" aq_err = %d.", ret,
hw->aq.asq_last_status);
return -ENOTSUP;
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev.h b/src/dpdk/drivers/net/i40e/i40e_ethdev.h
index 92c8fad0..a5d97aac 100644
--- a/src/dpdk/drivers/net/i40e/i40e_ethdev.h
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev.h
@@ -413,6 +413,7 @@ TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
struct i40e_pf {
struct i40e_adapter *adapter; /* The adapter this PF associate to */
struct i40e_vsi *main_vsi; /* pointer to main VSI structure */
+ struct i40e_vsi * ll_vsi; // TREX_PATCH
uint16_t mac_seid; /* The seid of the MAC of this PF */
uint16_t main_vsi_seid; /* The seid of the main VSI */
uint16_t max_num_vsi;
diff --git a/src/dpdk/drivers/net/i40e/i40e_fdir.c b/src/dpdk/drivers/net/i40e/i40e_fdir.c
index f65c4110..4f9a6b48 100644
--- a/src/dpdk/drivers/net/i40e/i40e_fdir.c
+++ b/src/dpdk/drivers/net/i40e/i40e_fdir.c
@@ -74,8 +74,11 @@
#define I40E_FDIR_UDP_DEFAULT_LEN 400
/* Wait count and interval for fdir filter programming */
-#define I40E_FDIR_WAIT_COUNT 10
-#define I40E_FDIR_WAIT_INTERVAL_US 1000
+#define TREX_PATCH
+// TREX_PATCH - Values were 10 and 1000. These numbers give much better performance when
+// configuring large amount of rules
+#define I40E_FDIR_WAIT_COUNT 100
+#define I40E_FDIR_WAIT_INTERVAL_US 100
/* Wait count and interval for fdir filter flush */
#define I40E_FDIR_FLUSH_RETRY 50
@@ -729,6 +732,9 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
fdir_input->flow.ip4_flow.ttl :
I40E_FDIR_IP_DEFAULT_TTL;
ip->type_of_service = fdir_input->flow.ip4_flow.tos;
+#ifdef TREX_PATCH
+ ip->packet_id = rte_cpu_to_be_16(fdir_input->flow.ip4_flow.ip_id);
+#endif
/*
* The source and destination fields in the transmitted packet
* need to be presented in a reversed order with respect
@@ -1143,8 +1149,11 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
fdirdp->dtype_cmd_cntindex |=
rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
fdirdp->dtype_cmd_cntindex |=
- rte_cpu_to_le_32(
- ((uint32_t)pf->fdir.match_counter_index <<
+#ifdef TREX_PATCH
+ rte_cpu_to_le_32((fdir_action->stat_count_index <<
+#else
+ rte_cpu_to_le_32((pf->fdir.match_counter_index <<
+#endif
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
@@ -1168,11 +1177,17 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) {
+#ifndef TREX_PATCH
+ /* itay: moved this delay after the check to avoid first check */
rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
+#endif
if ((txdp->cmd_type_offset_bsz &
rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
break;
+#ifdef TREX_PATCH
+ rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
+#endif
}
if (i >= I40E_FDIR_WAIT_COUNT) {
PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
@@ -1180,7 +1195,10 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
return -ETIMEDOUT;
}
/* totally delay 10 ms to check programming status*/
+#ifndef TREX_PATCH
+ /* itay: tests show this is not needed */
rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US);
+#endif
if (i40e_check_fdir_programming_status(rxq) < 0) {
PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
" programming status reported.");
diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx.c b/src/dpdk/drivers/net/i40e/i40e_rxtx.c
index d3cfb98f..a6488898 100644
--- a/src/dpdk/drivers/net/i40e/i40e_rxtx.c
+++ b/src/dpdk/drivers/net/i40e/i40e_rxtx.c
@@ -1930,6 +1930,35 @@ i40e_xmit_pkts_simple(void *tx_queue,
return nb_tx;
}
+// TREX_PATCH
+// Based on i40e_pf_get_vsi_by_qindex. Return low latency VSI one queue.
+#define LOW_LATENCY_WORKAROUND
+#ifdef LOW_LATENCY_WORKAROUND
+static struct i40e_vsi*
+i40e_pf_tx_get_vsi_by_qindex(struct i40e_pf *pf, uint16_t queue_idx)
+{
+ // For last queue index, return low latency VSI
+ if (queue_idx == pf->dev_data->nb_tx_queues-1) {
+ return pf->ll_vsi;
+ }
+
+ /* the queue in MAIN VSI range */
+ if (queue_idx < pf->dev_data->nb_tx_queues)
+ return pf->main_vsi;
+
+
+ queue_idx -= pf->main_vsi->nb_qps;
+
+ /* queue_idx is greater than VMDQ VSIs range */
+ if (queue_idx > pf->nb_cfg_vmdq_vsi * pf->vmdq_nb_qps - 1) {
+ PMD_INIT_LOG(ERR, "queue_idx out of range. VMDQ configured?");
+ return NULL;
+ }
+
+ return pf->vmdq[queue_idx / pf->vmdq_nb_qps].vsi;
+}
+#endif
+
/*
* Find the VSI the queue belongs to. 'queue_idx' is the queue index
* application used, which assume having sequential ones. But from driver's
@@ -2165,8 +2194,14 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct i40e_vf *vf =
I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
vsi = &vf->vsi;
- } else
+ } else {
+ // TREX_PATCH
+#ifdef LOW_LATENCY_WORKAROUND
+ vsi = i40e_pf_tx_get_vsi_by_qindex(pf, queue_idx);
+#else
vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx);
+#endif
+ }
if (vsi == NULL) {
PMD_DRV_LOG(ERR, "VSI not available or queue "
diff --git a/src/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h b/src/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h
index 31cc1bef..06d1ee1c 100644
--- a/src/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/src/dpdk/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -108,7 +108,9 @@ typedef int16_t s16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
+#ifndef __cplusplus
typedef int bool;
+#endif
#define mb() rte_mb()
#define wmb() rte_wmb()
diff --git a/src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c b/src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c
index 861c7cbe..c38ac97b 100644
--- a/src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c
@@ -248,8 +248,13 @@ configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl)
return -EINVAL;
};
+#define TREX_PATCH
+#ifdef TREX_PATCH
+ *fdirctrl |= (conf->flexbytes_offset << IXGBE_FDIRCTRL_FLEX_SHIFT);
+#else
*fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) <<
IXGBE_FDIRCTRL_FLEX_SHIFT;
+#endif
if (conf->mode >= RTE_FDIR_MODE_PERFECT &&
conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) {
@@ -515,7 +520,7 @@ ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
uint16_t i;
fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM);
-
+#ifndef TREX_PATCH
if (conf == NULL) {
PMD_DRV_LOG(ERR, "NULL pointer.");
return -EINVAL;
@@ -556,6 +561,11 @@ ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
return -EINVAL;
}
}
+#else
+ fdirm &= ~IXGBE_FDIRM_FLEX;
+ flexbytes = 1;
+ // fdirctrl gets flex_bytes_offset in configure_fdir_flags
+#endif
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0;
info->flex_bytes_offset = (uint8_t)((*fdirctrl &
@@ -587,6 +597,9 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev)
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_X550EM_a &&
mode != RTE_FDIR_MODE_SIGNATURE &&
+#ifdef TREX_PATCH
+ mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+#endif
mode != RTE_FDIR_MODE_PERFECT)
return -ENOSYS;
@@ -1134,11 +1147,14 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
return err;
if (is_perfect) {
+#ifndef TREX_PATCH
+ // No reason not to use IPV6 in perfect filters. It is working.
if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
PMD_DRV_LOG(ERR, "IPv6 is not supported in"
" perfect mode!");
return -ENOTSUP;
}
+#endif
fdirhash = atr_compute_perfect_hash_82599(&input,
dev->data->dev_conf.fdir_conf.pballoc);
fdirhash |= fdir_filter->soft_id <<
diff --git a/src/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/src/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
index 8a306b06..a018e926 100644
--- a/src/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/src/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -4913,7 +4913,8 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
} while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE));
if (!poll_ms)
- PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d",
+ // TREX_PATCH - changed log level from ERR to DEBUG
+ PMD_INIT_LOG(DEBUG, "Could not disable Rx Queue %d",
rx_queue_id);
rte_delay_us(RTE_IXGBE_WAIT_100_US);
diff --git a/src/dpdk/lib/librte_ether/rte_eth_ctrl.h b/src/dpdk/lib/librte_ether/rte_eth_ctrl.h
index c3a2c9e4..96145e86 100644
--- a/src/dpdk/lib/librte_ether/rte_eth_ctrl.h
+++ b/src/dpdk/lib/librte_ether/rte_eth_ctrl.h
@@ -420,6 +420,8 @@ struct rte_eth_l2_flow {
struct rte_eth_ipv4_flow {
uint32_t src_ip; /**< IPv4 source address in big endian. */
uint32_t dst_ip; /**< IPv4 destination address in big endian. */
+ // TREX_PATCH (ip_id)
+ uint16_t ip_id; /**< IPv4 IP ID to match */
uint8_t tos; /**< Type of service to match. */
uint8_t ttl; /**< Time to live to match. */
uint8_t proto; /**< Protocol, next header in big endian. */
@@ -590,6 +592,9 @@ struct rte_eth_fdir_action {
/**< If report_status is RTE_ETH_FDIR_REPORT_ID_FLEX_4 or
RTE_ETH_FDIR_REPORT_FLEX_8, flex_off specifies where the reported
flex bytes start from in flexible payload. */
+ // TREX_PATCH
+ // Index for statistics counter that will count FDIR matches.
+ uint16_t stat_count_index;
};
/**
diff --git a/src/dpdk/lib/librte_ether/rte_ethdev.c b/src/dpdk/lib/librte_ether/rte_ethdev.c
index 0a6e3f18..47ea4696 100644
--- a/src/dpdk/lib/librte_ether/rte_ethdev.c
+++ b/src/dpdk/lib/librte_ether/rte_ethdev.c
@@ -1479,6 +1479,40 @@ rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
}
}
+// TREX_PATCH
+// return in stats, statistics starting from start, for len counters.
+int
+rte_eth_fdir_stats_get(uint8_t port_id, uint32_t *stats, uint32_t start, uint32_t len)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+
+ // Only xl710 support this
+ i40e_trex_fdir_stats_get(dev, stats, start, len);
+
+ return 0;
+}
+
+// TREX_PATCH
+// zero statistics counters, starting from start, for len counters.
+int
+rte_eth_fdir_stats_reset(uint8_t port_id, uint32_t *stats, uint32_t start, uint32_t len)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+
+ // Only xl710 support this
+ i40e_trex_fdir_stats_reset(dev, stats, start, len);
+
+ return 0;
+}
+
int
rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
{
diff --git a/src/dpdk/lib/librte_ether/rte_ethdev.h b/src/dpdk/lib/librte_ether/rte_ethdev.h
index 4dac364a..6c99c88b 100644
--- a/src/dpdk/lib/librte_ether/rte_ethdev.h
+++ b/src/dpdk/lib/librte_ether/rte_ethdev.h
@@ -767,6 +767,8 @@ struct rte_fdir_conf {
struct rte_eth_fdir_masks mask;
struct rte_eth_fdir_flex_conf flex_conf;
/**< Flex payload configuration. */
+ // TREX_PATCH
+ uint8_t flexbytes_offset;
};
/**
diff --git a/src/dpdk/lib/librte_mbuf/rte_mbuf.h b/src/dpdk/lib/librte_mbuf/rte_mbuf.h
index 101485fb..9e607992 100644
--- a/src/dpdk/lib/librte_mbuf/rte_mbuf.h
+++ b/src/dpdk/lib/librte_mbuf/rte_mbuf.h
@@ -1059,6 +1059,9 @@ rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
static inline uint16_t
rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
{
+ // TREX_PATCH - The code in #if 0 caused tx queue to hang when running:
+ // sudo ./t-rex-64-o -f avl/sfr_delay_10_1g_no_bundeling.yaml -m 35 -p -d 100
+#if 0
/*
* The atomic_add is an expensive operation, so we don't want to
* call it in the case where we know we are the uniq holder of
@@ -1070,7 +1073,7 @@ rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
rte_mbuf_refcnt_set(m, 1 + value);
return 1 + value;
}
-
+#endif
return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
}