diff options
author | Ido Barnea <ibarnea@cisco.com> | 2017-02-05 16:16:22 +0200 |
---|---|---|
committer | Ido Barnea <ibarnea@cisco.com> | 2017-02-13 12:32:25 +0200 |
commit | 3c0de05a5d1951fab4067040be8192f0ee27d9b7 (patch) | |
tree | 3526f80435bac330f70537999f9eafb715a00917 /src | |
parent | 9ca4a157305e4e23a892ba9bafc9eee0f66954ce (diff) |
Our patches to dpdk1702 - not including mlx changes
Signed-off-by: Ido Barnea <ibarnea@cisco.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/dpdk/drivers/net/enic/enic_ethdev.c | 4 | ||||
-rw-r--r-- | src/dpdk/drivers/net/enic/enic_main.c | 5 | ||||
-rw-r--r-- | src/dpdk/drivers/net/i40e/i40e_ethdev.c | 102 | ||||
-rw-r--r-- | src/dpdk/drivers/net/i40e/i40e_fdir.c | 29 | ||||
-rw-r--r-- | src/dpdk/drivers/net/i40e/i40e_rxtx.c | 40 | ||||
-rw-r--r-- | src/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c | 5 | ||||
-rw-r--r-- | src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c | 18 | ||||
-rw-r--r-- | src/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c | 3 | ||||
-rw-r--r-- | src/dpdk/lib/librte_eal/linuxapp/eal/eal_interrupts.c | 4 | ||||
-rw-r--r-- | src/dpdk/lib/librte_ether/rte_eth_ctrl.h | 7 | ||||
-rw-r--r-- | src/dpdk/lib/librte_ether/rte_ethdev.h | 2 | ||||
-rw-r--r-- | src/dpdk/lib/librte_mbuf/rte_mbuf.h | 5 | ||||
-rw-r--r-- | src/pal/linux_dpdk/dpdk1702/rte_config.h | 368 |
13 files changed, 574 insertions, 18 deletions
diff --git a/src/dpdk/drivers/net/enic/enic_ethdev.c b/src/dpdk/drivers/net/enic/enic_ethdev.c index bffa8700..5e61cfc6 100644 --- a/src/dpdk/drivers/net/enic/enic_ethdev.c +++ b/src/dpdk/drivers/net/enic/enic_ethdev.c @@ -480,6 +480,10 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, device_info->default_rxconf = (struct rte_eth_rxconf) { .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH }; +#define TREX_PATCH +#ifdef TREX_PATCH + device_info->speed_capa = ETH_LINK_SPEED_40G; +#endif } static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) diff --git a/src/dpdk/drivers/net/enic/enic_main.c b/src/dpdk/drivers/net/enic/enic_main.c index 21e8edeb..aece2f05 100644 --- a/src/dpdk/drivers/net/enic/enic_main.c +++ b/src/dpdk/drivers/net/enic/enic_main.c @@ -179,7 +179,12 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated; r_stats->opackets = stats->tx.tx_frames_ok; +#define TREX_PATCH +#ifdef TREX_PATCH + r_stats->ibytes = stats->rx.rx_unicast_bytes_ok+stats->rx.rx_multicast_bytes_ok+stats->rx.rx_broadcast_bytes_ok; +#else r_stats->ibytes = stats->rx.rx_bytes_ok; +#endif r_stats->obytes = stats->tx.tx_bytes_ok; r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop; diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev.c b/src/dpdk/drivers/net/i40e/i40e_ethdev.c index 4492bcc1..f8ee5a60 100644 --- a/src/dpdk/drivers/net/i40e/i40e_ethdev.c +++ b/src/dpdk/drivers/net/i40e/i40e_ethdev.c @@ -704,6 +704,9 @@ static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) #define I40E_FLOW_CONTROL_ETHERTYPE 0x8808 +#define TREX_PATCH +#define TREX_PATCH_LOW_LATENCY + /* * Add a ethertype filter to drop all flow control frames transmitted * from VSIs. @@ -2515,9 +2518,11 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) I40E_GLPRT_PTC9522L(hw->port), pf->offset_loaded, &os->tx_size_big, &ns->tx_size_big); +#ifndef TREX_PATCH i40e_stat_update_32(hw, I40E_GLQF_PCNT(pf->fdir.match_counter_index), pf->offset_loaded, &os->fd_sb_match, &ns->fd_sb_match); +#endif /* GLPRT_MSPDC not supported */ /* GLPRT_XEC not supported */ @@ -2543,10 +2548,17 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) pf->main_vsi->eth_stats.rx_multicast + pf->main_vsi->eth_stats.rx_broadcast - pf->main_vsi->eth_stats.rx_discards; +#ifndef TREX_PATCH stats->opackets = pf->main_vsi->eth_stats.tx_unicast + pf->main_vsi->eth_stats.tx_multicast + pf->main_vsi->eth_stats.tx_broadcast; stats->ibytes = ns->eth.rx_bytes; +#else + /* Hanoch: move to global transmit and not pf->vsi and we have two high and low priorty */ + stats->opackets = ns->eth.tx_unicast +ns->eth.tx_multicast +ns->eth.tx_broadcast; + stats->ibytes = pf->main_vsi->eth_stats.rx_bytes; +#endif + stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors + pf->main_vsi->eth_stats.tx_errors; @@ -4327,10 +4339,18 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) /* create floating veb if vsi is NULL */ if (vsi != NULL) { ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid, - I40E_DEFAULT_TCMAP, false, +#ifdef TREX_PATCH_LOW_LATENCY + vsi->enabled_tc, false, +#else + I40E_DEFAULT_TCMAP, false, +#endif &veb->seid, false, NULL); } else { +#ifdef TREX_PATCH_LOW_LATENCY + ret = i40e_aq_add_veb(hw, 0, 0, vsi->enabled_tc, +#else ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP, +#endif true, &veb->seid, false, NULL); } @@ -4489,6 +4509,57 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return i40e_vsi_add_mac(vsi, &filter); } +#ifdef TREX_PATCH_LOW_LATENCY +static int +i40e_vsi_update_tc_max_bw(struct i40e_vsi *vsi, u16 credit){ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int ret; + + if (!vsi->seid) { + PMD_DRV_LOG(ERR, "seid not valid"); + return -EINVAL; + } + + ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, credit,0, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to configure TC BW"); + return ret; + } + return (0); +} + +static int +i40e_vsi_update_tc_bandwidth_ex(struct i40e_vsi *vsi) +{ + struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + int i, ret; + struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw_data; + struct i40e_aqc_configure_vsi_tc_bw_data * res_buffer; + + if (!vsi->seid) { + PMD_DRV_LOG(ERR, "seid not valid"); + return -EINVAL; + } + + memset(&tc_bw_data, 0, sizeof(tc_bw_data)); + tc_bw_data.tc_valid_bits = 3; + + /* enable TC 0,1 */ + ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw_data, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to configure TC BW"); + return ret; + } + + vsi->enabled_tc=3; + res_buffer = ( struct i40e_aqc_configure_vsi_tc_bw_data *)&tc_bw_data; + (void)rte_memcpy(vsi->info.qs_handle, res_buffer->qs_handles, + sizeof(vsi->info.qs_handle)); + + return I40E_SUCCESS; +} +#endif + /* * i40e_vsi_get_bw_config - Query VSI BW Information * @vsi: the VSI to be queried @@ -4565,7 +4636,8 @@ i40e_enable_pf_lb(struct i40e_pf *pf) /* Use the FW API if FW >= v5.0 */ if (hw->aq.fw_maj_ver < 5) { - PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); + //TREX_PATCH - changed from ERR to INFO. Most of our customers do not have latest FW + PMD_INIT_LOG(INFO, "FW < v5.0, cannot enable loopback"); return; } @@ -9831,6 +9903,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, * * Returns 0 on success, negative value on failure */ +//TREX_PATCH - changed all ERR to INFO in below func static int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) { @@ -9839,7 +9912,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) int ret = 0; if ((pf->flags & I40E_FLAG_DCB) == 0) { - PMD_INIT_LOG(ERR, "HW doesn't support DCB"); + PMD_INIT_LOG(INFO, "HW doesn't support DCB"); return -ENOTSUP; } @@ -9861,8 +9934,12 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) hw->local_dcbx_config.etscfg.willing = 0; hw->local_dcbx_config.etscfg.maxtcs = 0; hw->local_dcbx_config.etscfg.tcbwtable[0] = 100; - hw->local_dcbx_config.etscfg.tsatable[0] = - I40E_IEEE_TSA_ETS; + hw->local_dcbx_config.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; +#ifdef TREX_PATCH_LOW_LATENCY + hw->local_dcbx_config.etscfg.tcbwtable[1] = 0; + hw->local_dcbx_config.etscfg.tsatable[1] = I40E_IEEE_TSA_STRICT; + hw->local_dcbx_config.etscfg.prioritytable[1] = 1; +#endif hw->local_dcbx_config.etsrec = hw->local_dcbx_config.etscfg; hw->local_dcbx_config.pfc.willing = 0; @@ -9877,13 +9954,20 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) I40E_APP_PROTOID_FCOE; ret = i40e_set_dcb_config(hw); if (ret) { - PMD_INIT_LOG(ERR, + PMD_INIT_LOG(INFO, "default dcb config fails. err = %d, aq_err = %d.", ret, hw->aq.asq_last_status); return -ENOSYS; } +#ifdef TREX_PATCH_LOW_LATENCY + if (i40e_vsi_update_tc_bandwidth_ex(pf->main_vsi) != + I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to update TC bandwidth"); + return -ENOSYS; + } +#endif } else { - PMD_INIT_LOG(ERR, + PMD_INIT_LOG(INFO, "DCB initialization in FW fails, err = %d, aq_err = %d.", ret, hw->aq.asq_last_status); return -ENOTSUP; @@ -9896,12 +9980,12 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) ret = i40e_init_dcb(hw); if (!ret) { if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) { - PMD_INIT_LOG(ERR, + PMD_INIT_LOG(INFO, "HW doesn't support DCBX offload."); return -ENOTSUP; } } else { - PMD_INIT_LOG(ERR, + PMD_INIT_LOG(INFO, "DCBX configuration failed, err = %d, aq_err = %d.", ret, hw->aq.asq_last_status); return -ENOTSUP; diff --git a/src/dpdk/drivers/net/i40e/i40e_fdir.c b/src/dpdk/drivers/net/i40e/i40e_fdir.c index 0700253b..eddb2b9f 100644 --- a/src/dpdk/drivers/net/i40e/i40e_fdir.c +++ b/src/dpdk/drivers/net/i40e/i40e_fdir.c @@ -74,8 +74,11 @@ #define I40E_FDIR_UDP_DEFAULT_LEN 400 /* Wait count and interval for fdir filter programming */ -#define I40E_FDIR_WAIT_COUNT 10 -#define I40E_FDIR_WAIT_INTERVAL_US 1000 +#define TREX_PATCH +// TREX_PATCH - Values were 10 and 1000. These numbers give much better performance when +// configuring large amount of rules +#define I40E_FDIR_WAIT_COUNT 100 +#define I40E_FDIR_WAIT_INTERVAL_US 100 /* Wait count and interval for fdir filter flush */ #define I40E_FDIR_FLUSH_RETRY 50 @@ -751,6 +754,9 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input, fdir_input->flow.ip4_flow.ttl : I40E_FDIR_IP_DEFAULT_TTL; ip->type_of_service = fdir_input->flow.ip4_flow.tos; +#ifdef TREX_PATCH + ip->packet_id = rte_cpu_to_be_16(fdir_input->flow.ip4_flow.ip_id); +#endif /* * The source and destination fields in the transmitted packet * need to be presented in a reversed order with respect @@ -771,7 +777,11 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input, ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW | (fdir_input->flow.ipv6_flow.tc << - I40E_FDIR_IPv6_TC_OFFSET)); + I40E_FDIR_IPv6_TC_OFFSET) +#ifdef TREX_PATCH + | (fdir_input->flow.ipv6_flow.flow_label & 0x000fffff) +#endif + ); ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); ip6->proto = fdir_input->flow.ipv6_flow.proto ? @@ -1272,8 +1282,12 @@ i40e_fdir_filter_programming(struct i40e_pf *pf, fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); fdirdp->dtype_cmd_cntindex |= +#ifdef TREX_PATCH + rte_cpu_to_le_32((fdir_action->stat_count_index << +#else rte_cpu_to_le_32( ((uint32_t)pf->fdir.match_counter_index << +#endif I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK); @@ -1297,11 +1311,17 @@ i40e_fdir_filter_programming(struct i40e_pf *pf, I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) { +#ifndef TREX_PATCH + /* itay: moved this delay after the check to avoid first check */ rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US); +#endif if ((txdp->cmd_type_offset_bsz & rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) == rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) break; +#ifdef TREX_PATCH + rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US); +#endif } if (i >= I40E_FDIR_WAIT_COUNT) { PMD_DRV_LOG(ERR, "Failed to program FDIR filter:" @@ -1309,7 +1329,10 @@ i40e_fdir_filter_programming(struct i40e_pf *pf, return -ETIMEDOUT; } /* totally delay 10 ms to check programming status*/ +#ifndef TREX_PATCH + /* itay: tests show this is not needed */ rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US); +#endif if (i40e_check_fdir_programming_status(rxq) < 0) { PMD_DRV_LOG(ERR, "Failed to program FDIR filter:" " programming status reported."); diff --git a/src/dpdk/drivers/net/i40e/i40e_rxtx.c b/src/dpdk/drivers/net/i40e/i40e_rxtx.c index 608685fa..8aa55eef 100644 --- a/src/dpdk/drivers/net/i40e/i40e_rxtx.c +++ b/src/dpdk/drivers/net/i40e/i40e_rxtx.c @@ -1708,12 +1708,21 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t base, bsf, tc_mapping; int use_def_burst_func = 1; +#define TREX_PATCH_LOW_LATENCY +#ifdef TREX_PATCH_LOW_LATENCY + int is_vf = 0; +#endif + if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); vsi = &vf->vsi; - } else +#ifdef TREX_PATCH_LOW_LATENCY + is_vf = 1; +#endif + } else { vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx); + } if (vsi == NULL) { PMD_DRV_LOG(ERR, "VSI not available or queue " @@ -1829,6 +1838,12 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, ad->rx_bulk_alloc_allowed = false; } +#ifdef TREX_PATCH_LOW_LATENCY + if (! is_vf) + rxq->dcb_tc =0; + else // The entire for below is in the else +#endif + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) continue; @@ -1936,12 +1951,24 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_rs_thresh, tx_free_thresh; uint16_t i, base, bsf, tc_mapping; +#ifdef TREX_PATCH_LOW_LATENCY + u8 low_latency = 0; + int is_vf = 1; +#endif + if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); vsi = &vf->vsi; - } else + } else { vsi = i40e_pf_get_vsi_by_qindex(pf, queue_idx); +#ifdef TREX_PATCH_LOW_LATENCY + if (queue_idx == pf->dev_data->nb_tx_queues-1) { + low_latency = 1; + } + is_vf = 0; +#endif + } if (vsi == NULL) { PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) " @@ -2096,6 +2123,15 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, /* Use a simple TX queue without offloads or multi segs if possible */ i40e_set_tx_function_flag(dev, txq); +#ifdef TREX_PATCH_LOW_LATENCY + if (! is_vf) { + if (low_latency) { + txq->dcb_tc=1; + }else{ + txq->dcb_tc=0; + } + } else // The entire for below is in the else +#endif for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) continue; diff --git a/src/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c b/src/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c index 5b625a3d..0b988b28 100644 --- a/src/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/src/dpdk/drivers/net/ixgbe/ixgbe_ethdev.c @@ -6507,12 +6507,17 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev, if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) return -EINVAL; +#define TREX_PATCH +#ifndef TREX_PATCH + // no real reason to block this. + // We configure rules using FDIR and ethertype that point to same queue, so there are no race condition issues. if (filter->ether_type == ETHER_TYPE_IPv4 || filter->ether_type == ETHER_TYPE_IPv6) { PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in" " ethertype filter.", filter->ether_type); return -EINVAL; } +#endif if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) { PMD_DRV_LOG(ERR, "mac compare is unsupported."); diff --git a/src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c b/src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c index 3b9d60ca..61b36d79 100644 --- a/src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c +++ b/src/dpdk/drivers/net/ixgbe/ixgbe_fdir.c @@ -247,8 +247,13 @@ configure_fdir_flags(const struct rte_fdir_conf *conf, uint32_t *fdirctrl) return -EINVAL; }; +#define TREX_PATCH +#ifdef TREX_PATCH + *fdirctrl |= (conf->flexbytes_offset << IXGBE_FDIRCTRL_FLEX_SHIFT); +#else *fdirctrl |= (IXGBE_DEFAULT_FLEXBYTES_OFFSET / sizeof(uint16_t)) << IXGBE_FDIRCTRL_FLEX_SHIFT; +#endif if (conf->mode >= RTE_FDIR_MODE_PERFECT && conf->mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) { @@ -564,7 +569,7 @@ ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, uint16_t i; fdirm = IXGBE_READ_REG(hw, IXGBE_FDIRM); - +#ifndef TREX_PATCH if (conf == NULL) { PMD_DRV_LOG(ERR, "NULL pointer."); return -EINVAL; @@ -605,6 +610,11 @@ ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev, return -EINVAL; } } +#else + fdirm &= ~IXGBE_FDIRM_FLEX; + flexbytes = 1; + // fdirctrl gets flex_bytes_offset in configure_fdir_flags +#endif IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); info->mask.flex_bytes_mask = flexbytes ? UINT16_MAX : 0; info->flex_bytes_offset = (uint8_t)((*fdirctrl & @@ -636,6 +646,9 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev) hw->mac.type != ixgbe_mac_X550EM_x && hw->mac.type != ixgbe_mac_X550EM_a && mode != RTE_FDIR_MODE_SIGNATURE && +#ifdef TREX_PATCH + mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN && +#endif mode != RTE_FDIR_MODE_PERFECT) return -ENOSYS; @@ -1255,12 +1268,15 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, is_perfect = TRUE; if (is_perfect) { +#ifndef TREX_PATCH + // No reason not to use IPV6 in perfect filters. It is working. if (rule->ixgbe_fdir.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) { PMD_DRV_LOG(ERR, "IPv6 is not supported in" " perfect mode!"); return -ENOTSUP; } +#endif fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir, dev->data->dev_conf.fdir_conf.pballoc); fdirhash |= rule->soft_id << diff --git a/src/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c b/src/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c index 36f1c020..f4fb3582 100644 --- a/src/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/src/dpdk/drivers/net/ixgbe/ixgbe_rxtx.c @@ -4969,7 +4969,8 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", + // TREX_PATCH - changed log level from ERR to DEBUG + PMD_INIT_LOG(DEBUG, "Could not disable Rx Queue %d", rx_queue_id); rte_delay_us(RTE_IXGBE_WAIT_100_US); diff --git a/src/dpdk/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/src/dpdk/lib/librte_eal/linuxapp/eal/eal_interrupts.c index b5b3f2bd..c9bf3b8d 100644 --- a/src/dpdk/lib/librte_eal/linuxapp/eal/eal_interrupts.c +++ b/src/dpdk/lib/librte_eal/linuxapp/eal/eal_interrupts.c @@ -732,11 +732,13 @@ eal_intr_process_interrupts(struct epoll_event *events, int nfds) if (bytes_read < 0) { if (errno == EINTR || errno == EWOULDBLOCK) continue; - +#if 0 +// TREX_PATCH - this causes endless messages on e1000 with dpdk1702 RTE_LOG(ERR, EAL, "Error reading from file " "descriptor %d: %s\n", events[n].data.fd, strerror(errno)); +#endif } else if (bytes_read == 0) RTE_LOG(ERR, EAL, "Read nothing from file " "descriptor %d\n", events[n].data.fd); diff --git a/src/dpdk/lib/librte_ether/rte_eth_ctrl.h b/src/dpdk/lib/librte_ether/rte_eth_ctrl.h index 83869042..9f1dd32b 100644 --- a/src/dpdk/lib/librte_ether/rte_eth_ctrl.h +++ b/src/dpdk/lib/librte_ether/rte_eth_ctrl.h @@ -425,6 +425,8 @@ struct rte_eth_l2_flow { struct rte_eth_ipv4_flow { uint32_t src_ip; /**< IPv4 source address in big endian. */ uint32_t dst_ip; /**< IPv4 destination address in big endian. */ + // TREX_PATCH (ip_id) + uint16_t ip_id; /**< IPv4 IP ID to match */ uint8_t tos; /**< Type of service to match. */ uint8_t ttl; /**< Time to live to match. */ uint8_t proto; /**< Protocol, next header in big endian. */ @@ -467,6 +469,8 @@ struct rte_eth_ipv6_flow { uint8_t tc; /**< Traffic class to match. */ uint8_t proto; /**< Protocol, next header to match. */ uint8_t hop_limits; /**< Hop limits to match. */ + // TREX_PATCH (flow_label) + uint32_t flow_label; /**<flow label to match. */ }; /** @@ -595,6 +599,9 @@ struct rte_eth_fdir_action { /**< If report_status is RTE_ETH_FDIR_REPORT_ID_FLEX_4 or RTE_ETH_FDIR_REPORT_FLEX_8, flex_off specifies where the reported flex bytes start from in flexible payload. */ + // TREX_PATCH + // Index for statistics counter that will count FDIR matches. + uint16_t stat_count_index; }; /** diff --git a/src/dpdk/lib/librte_ether/rte_ethdev.h b/src/dpdk/lib/librte_ether/rte_ethdev.h index c17bbda8..ce8d805c 100644 --- a/src/dpdk/lib/librte_ether/rte_ethdev.h +++ b/src/dpdk/lib/librte_ether/rte_ethdev.h @@ -797,6 +797,8 @@ struct rte_fdir_conf { struct rte_eth_fdir_masks mask; struct rte_eth_fdir_flex_conf flex_conf; /**< Flex payload configuration. */ + // TREX_PATCH + uint8_t flexbytes_offset; }; /** diff --git a/src/dpdk/lib/librte_mbuf/rte_mbuf.h b/src/dpdk/lib/librte_mbuf/rte_mbuf.h index 0d01167c..7144b8d8 100644 --- a/src/dpdk/lib/librte_mbuf/rte_mbuf.h +++ b/src/dpdk/lib/librte_mbuf/rte_mbuf.h @@ -689,6 +689,9 @@ rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value) static inline uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) { + // TREX_PATCH - The code in #if 0 caused tx queue to hang when running: + // sudo ./t-rex-64-o -f avl/sfr_delay_10_1g_no_bundeling.yaml -m 35 -p -d 100 +#if 0 /* * The atomic_add is an expensive operation, so we don't want to * call it in the case where we know we are the uniq holder of @@ -700,7 +703,7 @@ rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) rte_mbuf_refcnt_set(m, 1 + value); return 1 + value; } - +#endif return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value)); } diff --git a/src/pal/linux_dpdk/dpdk1702/rte_config.h b/src/pal/linux_dpdk/dpdk1702/rte_config.h new file mode 100644 index 00000000..3e27dc88 --- /dev/null +++ b/src/pal/linux_dpdk/dpdk1702/rte_config.h @@ -0,0 +1,368 @@ +#ifndef __RTE_CONFIG_H +#define __RTE_CONFIG_H +#undef RTE_EXEC_ENV +#define RTE_EXEC_ENV "linuxapp" +#undef RTE_ARCH +#define RTE_ARCH "x86_64" +#undef RTE_MACHINE +#define RTE_MACHINE "native" +#undef RTE_TOOLCHAIN +#define RTE_TOOLCHAIN "gcc" +#undef RTE_FORCE_INTRINSICS +#undef RTE_ARCH_STRICT_ALIGN +#undef RTE_BUILD_SHARED_LIB +#undef RTE_NEXT_ABI +#define RTE_NEXT_ABI 1 +#undef RTE_CACHE_LINE_SIZE +#define RTE_CACHE_LINE_SIZE 64 +#undef RTE_LIBRTE_EAL +#define RTE_LIBRTE_EAL 1 +#undef RTE_MAX_LCORE +#define RTE_MAX_LCORE 128 +#undef RTE_MAX_NUMA_NODES +#define RTE_MAX_NUMA_NODES 8 +#undef RTE_MAX_MEMSEG +#define RTE_MAX_MEMSEG 256 +#undef RTE_MAX_MEMZONE +#define RTE_MAX_MEMZONE 2560 +#undef RTE_MAX_TAILQ +#define RTE_MAX_TAILQ 32 +#undef RTE_LOG_LEVEL +#define RTE_LOG_LEVEL RTE_LOG_INFO +#undef RTE_LOG_DP_LEVEL +#define RTE_LOG_DP_LEVEL RTE_LOG_INFO +#undef RTE_LOG_HISTORY +#define RTE_LOG_HISTORY 256 +#undef RTE_LIBEAL_USE_HPET +#undef RTE_EAL_ALLOW_INV_SOCKET_ID +#undef RTE_EAL_ALWAYS_PANIC_ON_ERROR +#undef RTE_EAL_IGB_UIO +#define RTE_EAL_IGB_UIO 1 +#undef RTE_EAL_VFIO +#define RTE_EAL_VFIO 1 +#undef RTE_MALLOC_DEBUG +#undef RTE_EAL_PMD_PATH +#define RTE_EAL_PMD_PATH "" +#undef RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT +#define RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT 1 +#undef RTE_LIBRTE_KVARGS +#define RTE_LIBRTE_KVARGS 1 +#undef RTE_LIBRTE_ETHER +#define RTE_LIBRTE_ETHER 1 +#undef RTE_LIBRTE_ETHDEV_DEBUG +#undef RTE_MAX_ETHPORTS +#define RTE_MAX_ETHPORTS 32 +#undef RTE_MAX_QUEUES_PER_PORT +#define RTE_MAX_QUEUES_PER_PORT 1024 +#undef RTE_LIBRTE_IEEE1588 +#undef RTE_ETHDEV_QUEUE_STAT_CNTRS +#define RTE_ETHDEV_QUEUE_STAT_CNTRS 16 +#undef RTE_ETHDEV_RXTX_CALLBACKS +// TREX_PATCH RTE_ETHDEV_RXTX_CALLBACKS used to be 1 +#undef RTE_ETHDEV_TX_PREPARE_NOOP +#undef RTE_NIC_BYPASS +#undef RTE_LIBRTE_ENA_PMD +#define RTE_LIBRTE_ENA_PMD 1 +#undef RTE_LIBRTE_ENA_DEBUG_RX +#undef RTE_LIBRTE_ENA_DEBUG_TX +#undef RTE_LIBRTE_ENA_DEBUG_TX_FREE +#undef RTE_LIBRTE_ENA_DEBUG_DRIVER +#undef RTE_LIBRTE_ENA_COM_DEBUG +#undef RTE_LIBRTE_EM_PMD +#define RTE_LIBRTE_EM_PMD 1 +#undef RTE_LIBRTE_IGB_PMD +#define RTE_LIBRTE_IGB_PMD 1 +#undef RTE_LIBRTE_E1000_DEBUG_INIT +#undef RTE_LIBRTE_E1000_DEBUG_RX +#undef RTE_LIBRTE_E1000_DEBUG_TX +#undef RTE_LIBRTE_E1000_DEBUG_TX_FREE +#undef RTE_LIBRTE_E1000_DEBUG_DRIVER +#undef RTE_LIBRTE_E1000_PF_DISABLE_STRIP_CRC +#undef RTE_LIBRTE_IXGBE_PMD +#define RTE_LIBRTE_IXGBE_PMD 1 +#undef RTE_LIBRTE_IXGBE_DEBUG_INIT +#undef RTE_LIBRTE_IXGBE_DEBUG_RX +#undef RTE_LIBRTE_IXGBE_DEBUG_TX +#undef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE +#undef RTE_LIBRTE_IXGBE_DEBUG_DRIVER +#undef RTE_LIBRTE_IXGBE_PF_DISABLE_STRIP_CRC +#undef RTE_IXGBE_INC_VECTOR +#define RTE_IXGBE_INC_VECTOR 1 +#undef RTE_IXGBE_RX_OLFLAGS_ENABLE +#define RTE_IXGBE_RX_OLFLAGS_ENABLE 1 +#undef RTE_LIBRTE_I40E_PMD +#define RTE_LIBRTE_I40E_PMD 1 +#undef RTE_LIBRTE_I40E_DEBUG_INIT +#undef RTE_LIBRTE_I40E_DEBUG_RX +#undef RTE_LIBRTE_I40E_DEBUG_TX +#undef RTE_LIBRTE_I40E_DEBUG_TX_FREE +#undef RTE_LIBRTE_I40E_DEBUG_DRIVER +#undef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC +#define RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC 1 +#undef RTE_LIBRTE_I40E_INC_VECTOR +#define RTE_LIBRTE_I40E_INC_VECTOR 1 +#undef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE +#define RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE 1 +#undef RTE_LIBRTE_I40E_16BYTE_RX_DESC +#undef RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF +#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF 64 +#undef RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF +#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4 +#undef RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM +#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM 4 +#undef RTE_LIBRTE_I40E_ITR_INTERVAL +#define RTE_LIBRTE_I40E_ITR_INTERVAL -1 +#undef RTE_LIBRTE_FM10K_PMD +#define RTE_LIBRTE_FM10K_PMD 1 +#undef RTE_LIBRTE_FM10K_DEBUG_INIT +#undef RTE_LIBRTE_FM10K_DEBUG_RX +#undef RTE_LIBRTE_FM10K_DEBUG_TX +#undef RTE_LIBRTE_FM10K_DEBUG_TX_FREE +#undef RTE_LIBRTE_FM10K_DEBUG_DRIVER +#undef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE +#define RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE 1 +#undef RTE_LIBRTE_FM10K_INC_VECTOR +#define RTE_LIBRTE_FM10K_INC_VECTOR 1 +#undef RTE_LIBRTE_MLX4_PMD +#undef RTE_LIBRTE_MLX4_DEBUG +#undef RTE_LIBRTE_MLX4_SGE_WR_N +#define RTE_LIBRTE_MLX4_SGE_WR_N 4 +#undef RTE_LIBRTE_MLX4_MAX_INLINE +#define RTE_LIBRTE_MLX4_MAX_INLINE 0 +#undef RTE_LIBRTE_MLX4_TX_MP_CACHE +#define RTE_LIBRTE_MLX4_TX_MP_CACHE 8 +#undef RTE_LIBRTE_MLX4_SOFT_COUNTERS +#define RTE_LIBRTE_MLX4_SOFT_COUNTERS 1 +#undef RTE_LIBRTE_MLX5_PMD +#undef RTE_LIBRTE_MLX5_DEBUG +#undef RTE_LIBRTE_MLX5_TX_MP_CACHE +#define RTE_LIBRTE_MLX5_TX_MP_CACHE 8 +#undef RTE_LIBRTE_BNX2X_PMD +#undef RTE_LIBRTE_BNX2X_DEBUG +#undef RTE_LIBRTE_BNX2X_DEBUG_INIT +#undef RTE_LIBRTE_BNX2X_DEBUG_RX +#undef RTE_LIBRTE_BNX2X_DEBUG_TX +#undef RTE_LIBRTE_BNX2X_MF_SUPPORT +#undef RTE_LIBRTE_BNX2X_DEBUG_PERIODIC +#undef RTE_LIBRTE_CXGBE_PMD +#define RTE_LIBRTE_CXGBE_PMD 1 +#undef RTE_LIBRTE_CXGBE_DEBUG +#undef RTE_LIBRTE_CXGBE_DEBUG_REG +#undef RTE_LIBRTE_CXGBE_DEBUG_MBOX +#undef RTE_LIBRTE_CXGBE_DEBUG_TX +#undef RTE_LIBRTE_CXGBE_DEBUG_RX +#undef RTE_LIBRTE_ENIC_PMD +#define RTE_LIBRTE_ENIC_PMD 1 +#undef RTE_LIBRTE_ENIC_DEBUG +#undef RTE_LIBRTE_NFP_PMD +#define RTE_LIBRTE_NFP_PMD 1 +#undef RTE_LIBRTE_NFP_DEBUG +#undef RTE_LIBRTE_BNXT_PMD +#define RTE_LIBRTE_BNXT_PMD 1 +#undef RTE_LIBRTE_SFC_EFX_PMD +#define RTE_LIBRTE_SFC_EFX_PMD 1 +#undef RTE_LIBRTE_SFC_EFX_DEBUG +#undef RTE_LIBRTE_SFC_EFX_TSO +#undef RTE_LIBRTE_PMD_SZEDATA2 +#undef RTE_LIBRTE_PMD_SZEDATA2_AS +#define RTE_LIBRTE_PMD_SZEDATA2_AS 0 +#undef RTE_LIBRTE_THUNDERX_NICVF_PMD +#undef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT +#undef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX +#undef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX +#undef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER +#undef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX +#undef RTE_LIBRTE_VIRTIO_PMD +#define RTE_LIBRTE_VIRTIO_PMD 1 +#undef RTE_LIBRTE_VIRTIO_DEBUG_INIT +#undef RTE_LIBRTE_VIRTIO_DEBUG_RX +#undef RTE_LIBRTE_VIRTIO_DEBUG_TX +#undef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER +#undef RTE_LIBRTE_VIRTIO_DEBUG_DUMP +#undef RTE_VIRTIO_USER +#define RTE_VIRTIO_USER 1 +#undef RTE_LIBRTE_VMXNET3_PMD +#define RTE_LIBRTE_VMXNET3_PMD 1 +#undef RTE_LIBRTE_VMXNET3_DEBUG_INIT +#undef RTE_LIBRTE_VMXNET3_DEBUG_RX +#undef RTE_LIBRTE_VMXNET3_DEBUG_TX +#undef RTE_LIBRTE_VMXNET3_DEBUG_TX_FREE +#undef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER +#undef RTE_LIBRTE_PMD_RING +#define RTE_LIBRTE_PMD_RING 1 +#undef RTE_PMD_RING_MAX_RX_RINGS +#define RTE_PMD_RING_MAX_RX_RINGS 16 +#undef RTE_PMD_RING_MAX_TX_RINGS +#define RTE_PMD_RING_MAX_TX_RINGS 16 +#undef RTE_LIBRTE_PMD_PCAP +#undef RTE_LIBRTE_PMD_BOND +#define RTE_LIBRTE_PMD_BOND 1 +#undef RTE_LIBRTE_BOND_DEBUG_ALB +#undef RTE_LIBRTE_BOND_DEBUG_ALB_L1 +#undef RTE_LIBRTE_QEDE_PMD +#define RTE_LIBRTE_QEDE_PMD 1 +#undef RTE_LIBRTE_QEDE_DEBUG_INIT +#undef RTE_LIBRTE_QEDE_DEBUG_INFO +#undef RTE_LIBRTE_QEDE_DEBUG_DRIVER +#undef RTE_LIBRTE_QEDE_DEBUG_TX +#undef RTE_LIBRTE_QEDE_DEBUG_RX +#undef RTE_LIBRTE_QEDE_FW +#define RTE_LIBRTE_QEDE_FW "" +#undef RTE_LIBRTE_PMD_AF_PACKET +#define RTE_LIBRTE_PMD_AF_PACKET 1 +#undef RTE_LIBRTE_PMD_TAP +#define RTE_LIBRTE_PMD_TAP 1 +#undef RTE_LIBRTE_PMD_XENVIRT +#undef RTE_LIBRTE_PMD_NULL +#define RTE_LIBRTE_PMD_NULL 1 +#undef RTE_PMD_PACKET_PREFETCH +#define RTE_PMD_PACKET_PREFETCH 1 +#undef RTE_LIBRTE_CRYPTODEV +#define RTE_LIBRTE_CRYPTODEV 1 +#undef RTE_LIBRTE_CRYPTODEV_DEBUG +#undef RTE_CRYPTO_MAX_DEVS +#define RTE_CRYPTO_MAX_DEVS 64 +#undef RTE_CRYPTODEV_NAME_LEN +#define RTE_CRYPTODEV_NAME_LEN 64 +#undef RTE_LIBRTE_PMD_ARMV8_CRYPTO +#undef RTE_LIBRTE_PMD_ARMV8_CRYPTO_DEBUG +#undef RTE_LIBRTE_PMD_QAT +#undef RTE_LIBRTE_PMD_QAT_DEBUG_INIT +#undef RTE_LIBRTE_PMD_QAT_DEBUG_TX +#undef RTE_LIBRTE_PMD_QAT_DEBUG_RX +#undef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER +#undef RTE_QAT_PMD_MAX_NB_SESSIONS +#define RTE_QAT_PMD_MAX_NB_SESSIONS 2048 +#undef RTE_LIBRTE_PMD_AESNI_MB +#undef RTE_LIBRTE_PMD_AESNI_MB_DEBUG +#undef RTE_LIBRTE_PMD_OPENSSL +#undef RTE_LIBRTE_PMD_OPENSSL_DEBUG +#undef RTE_LIBRTE_PMD_AESNI_GCM +#undef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG +#undef RTE_LIBRTE_PMD_SNOW3G +#undef RTE_LIBRTE_PMD_SNOW3G_DEBUG +#undef RTE_LIBRTE_PMD_KASUMI +#undef RTE_LIBRTE_PMD_KASUMI_DEBUG +#undef RTE_LIBRTE_PMD_ZUC +#undef RTE_LIBRTE_PMD_ZUC_DEBUG +#undef RTE_LIBRTE_PMD_NULL_CRYPTO +#define RTE_LIBRTE_PMD_NULL_CRYPTO 1 +#undef RTE_LIBRTE_RING +#define RTE_LIBRTE_RING 1 +#undef RTE_LIBRTE_RING_DEBUG +#undef RTE_RING_SPLIT_PROD_CONS +#undef RTE_RING_PAUSE_REP_COUNT +#define RTE_RING_PAUSE_REP_COUNT 0 +#undef RTE_LIBRTE_MEMPOOL +#define RTE_LIBRTE_MEMPOOL 1 +#undef RTE_MEMPOOL_CACHE_MAX_SIZE +#define RTE_MEMPOOL_CACHE_MAX_SIZE 512 +#undef RTE_LIBRTE_MEMPOOL_DEBUG +#undef RTE_LIBRTE_MBUF +#define RTE_LIBRTE_MBUF 1 +#undef RTE_LIBRTE_MBUF_DEBUG +#undef RTE_MBUF_DEFAULT_MEMPOOL_OPS +#define RTE_MBUF_DEFAULT_MEMPOOL_OPS "ring_mp_mc" +#undef RTE_MBUF_REFCNT_ATOMIC +#define RTE_MBUF_REFCNT_ATOMIC 1 +#undef RTE_PKTMBUF_HEADROOM +// TREX_PATCH: DPDK original value is 128 here. This creates big overhead of memory. +// We would like to put 0, but it cuases compilation issues with virtio driver. +// 16 caused big performance degradation because of alignment issues. So 64 is the winner. +#define RTE_PKTMBUF_HEADROOM 64 +#undef RTE_LIBRTE_TIMER +#define RTE_LIBRTE_TIMER 1 +#undef RTE_LIBRTE_TIMER_DEBUG +#undef RTE_LIBRTE_CFGFILE +#define RTE_LIBRTE_CFGFILE 1 +#undef RTE_LIBRTE_CMDLINE +#define RTE_LIBRTE_CMDLINE 1 +#undef RTE_LIBRTE_CMDLINE_DEBUG +#undef RTE_LIBRTE_HASH +#define RTE_LIBRTE_HASH 1 +#undef RTE_LIBRTE_HASH_DEBUG +#undef RTE_LIBRTE_EFD +#define RTE_LIBRTE_EFD 1 +#undef RTE_LIBRTE_JOBSTATS +#define RTE_LIBRTE_JOBSTATS 1 +#undef RTE_LIBRTE_LPM +#define RTE_LIBRTE_LPM 1 +#undef RTE_LIBRTE_LPM_DEBUG +#undef RTE_LIBRTE_ACL +#define RTE_LIBRTE_ACL 1 +#undef RTE_LIBRTE_ACL_DEBUG +#undef RTE_LIBRTE_POWER +#define RTE_LIBRTE_POWER 1 +#undef RTE_LIBRTE_POWER_DEBUG +#undef RTE_MAX_LCORE_FREQS +#define RTE_MAX_LCORE_FREQS 64 +#undef RTE_LIBRTE_NET +#define RTE_LIBRTE_NET 1 +#undef RTE_LIBRTE_IP_FRAG +#define RTE_LIBRTE_IP_FRAG 1 +#undef RTE_LIBRTE_IP_FRAG_DEBUG +#undef RTE_LIBRTE_IP_FRAG_MAX_FRAG +#define RTE_LIBRTE_IP_FRAG_MAX_FRAG 4 +#undef RTE_LIBRTE_IP_FRAG_TBL_STAT +#undef RTE_LIBRTE_METER +#define RTE_LIBRTE_METER 1 +#undef RTE_LIBRTE_SCHED +#define RTE_LIBRTE_SCHED 1 +#undef RTE_SCHED_DEBUG +#undef RTE_SCHED_RED +#undef RTE_SCHED_COLLECT_STATS +#undef RTE_SCHED_SUBPORT_TC_OV +#undef RTE_SCHED_PORT_N_GRINDERS +#define RTE_SCHED_PORT_N_GRINDERS 8 +#undef RTE_SCHED_VECTOR +#undef RTE_LIBRTE_DISTRIBUTOR +#define RTE_LIBRTE_DISTRIBUTOR 1 +#undef RTE_LIBRTE_REORDER +#define RTE_LIBRTE_REORDER 1 +#undef RTE_LIBRTE_PORT +#define RTE_LIBRTE_PORT 1 +#undef RTE_PORT_STATS_COLLECT +#undef RTE_PORT_PCAP +#undef RTE_LIBRTE_TABLE +#define RTE_LIBRTE_TABLE 1 +#undef RTE_TABLE_STATS_COLLECT +#undef RTE_LIBRTE_PIPELINE +#define RTE_LIBRTE_PIPELINE 1 +#undef RTE_PIPELINE_STATS_COLLECT +#undef RTE_LIBRTE_KNI +#define RTE_LIBRTE_KNI 1 +#undef RTE_KNI_KMOD +#define RTE_KNI_KMOD 1 +#undef RTE_KNI_PREEMPT_DEFAULT +#define RTE_KNI_PREEMPT_DEFAULT 1 +#undef RTE_KNI_VHOST +#undef RTE_KNI_VHOST_MAX_CACHE_SIZE +#define RTE_KNI_VHOST_MAX_CACHE_SIZE 1024 +#undef RTE_KNI_VHOST_VNET_HDR_EN +#undef RTE_LIBRTE_PDUMP +#define RTE_LIBRTE_PDUMP 1 +#undef RTE_LIBRTE_VHOST +#define RTE_LIBRTE_VHOST 1 +#undef RTE_LIBRTE_VHOST_NUMA +#undef RTE_LIBRTE_VHOST_DEBUG +#undef RTE_LIBRTE_PMD_VHOST +#define RTE_LIBRTE_PMD_VHOST 1 +#undef RTE_LIBRTE_XEN_DOM0 +#undef RTE_APP_TEST +#define RTE_APP_TEST 1 +#undef RTE_APP_TEST_RESOURCE_TAR +#undef RTE_TEST_PMD +#define RTE_TEST_PMD 1 +#undef RTE_TEST_PMD_RECORD_CORE_CYCLES +#undef RTE_TEST_PMD_RECORD_BURST_STATS +#undef RTE_EXEC_ENV_LINUXAPP +#define RTE_EXEC_ENV_LINUXAPP 1 +#undef RTE_ARCH_X86_64 +#define RTE_ARCH_X86_64 1 +#undef RTE_ARCH_X86 +#define RTE_ARCH_X86 1 +#undef RTE_ARCH_64 +#define RTE_ARCH_64 1 +#undef RTE_TOOLCHAIN_GCC +#define RTE_TOOLCHAIN_GCC 1 +#endif /* __RTE_CONFIG_H */ |