diff options
author | Steven Luong <sluong@cisco.com> | 2020-06-05 06:33:25 -0700 |
---|---|---|
committer | Andrew Yourtchenko <ayourtch@gmail.com> | 2020-07-01 10:04:24 +0000 |
commit | f6a8a3a31c34c3c1dadffb257a64fe31faeff9c7 (patch) | |
tree | 41dc4df827ec50327f8fadbf008a8056110b19ef /build/external/patches/dpdk_19.05 | |
parent | 4f3db90a6f6640cd4e9e5a80769b9bd8c07fd558 (diff) |
build: backporting a dpdk i40e TSO pkt exceeds buffer size patch
The subject patch appears to be critical. Backport it to DPDK_19.05
Type: fix
Signed-off-by: Steven Luong <sluong@cisco.com>
Change-Id: Ic25cb8c5798c3218f739c9dd5ce4d70da5782457
Diffstat (limited to 'build/external/patches/dpdk_19.05')
-rw-r--r-- | build/external/patches/dpdk_19.05/001-net-i40e-fix-TSO-pkt-exceeds-allowed-buf-size-issue.patch | 99 |
1 files changed, 99 insertions, 0 deletions
diff --git a/build/external/patches/dpdk_19.05/001-net-i40e-fix-TSO-pkt-exceeds-allowed-buf-size-issue.patch b/build/external/patches/dpdk_19.05/001-net-i40e-fix-TSO-pkt-exceeds-allowed-buf-size-issue.patch new file mode 100644 index 00000000000..105d60afc60 --- /dev/null +++ b/build/external/patches/dpdk_19.05/001-net-i40e-fix-TSO-pkt-exceeds-allowed-buf-size-issue.patch @@ -0,0 +1,99 @@ +From: Xiaoyun Li <xiaoyun.li@intel.com> +To: qi.z.zhang@intel.com, beilei.xing@intel.com, + ciara.loftus@intel.com, dev@dpdk.org +Cc: Xiaoyun Li <xiaoyun.li@intel.com>, stable@dpdk.org +Subject: [dpdk-stable] [PATCH v3] net/i40e: fix TSO pkt exceeds allowed buf size issue +Date: Thu, 26 Dec 2019 14:45:44 +0800 +Message-ID: <20191226064544.48322-1-xiaoyun.li@intel.com> (raw) +In-Reply-To: <20191223025547.88798-1-xiaoyun.li@intel.com> + +Hardware limits that max buffer size per tx descriptor should be +(16K-1)B. So when TSO enabled, the mbuf data size may exceed the +limit and cause malicious behavior to the NIC. This patch fixes +this issue by using more tx descs for this kind of large buffer. + +Fixes: 4861cde46116 ("i40e: new poll mode driver") +Cc: stable@dpdk.org + +Signed-off-by: Xiaoyun Li <xiaoyun.li@intel.com> +--- +v3: + * Reused the existing macros to define I40E_MAX_DATA_PER_TXD +v2: + * Each pkt can have several segments so the needed tx descs should sum + * all segments up. +--- + drivers/net/i40e/i40e_rxtx.c | 45 +++++++++++++++++++++++++++++++++++- + 1 file changed, 44 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c +index 17dc8c78f..bbdba39b3 100644 +--- a/drivers/net/i40e/i40e_rxtx.c ++++ b/drivers/net/i40e/i40e_rxtx.c +@@ -989,6 +989,24 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload) + return ctx_desc; + } + ++/* HW requires that Tx buffer size ranges from 1B up to (16K-1)B. */ ++#define I40E_MAX_DATA_PER_TXD \ ++ (I40E_TXD_QW1_TX_BUF_SZ_MASK >> I40E_TXD_QW1_TX_BUF_SZ_SHIFT) ++/* Calculate the number of TX descriptors needed for each pkt */ ++static inline uint16_t ++i40e_calc_pkt_desc(struct rte_mbuf *tx_pkt) ++{ ++ struct rte_mbuf *txd = tx_pkt; ++ uint16_t count = 0; ++ ++ while (txd != NULL) { ++ count += DIV_ROUND_UP(txd->data_len, I40E_MAX_DATA_PER_TXD); ++ txd = txd->next; ++ } ++ ++ return count; ++} ++ + uint16_t + i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + { +@@ -1046,8 +1064,15 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + * The number of descriptors that must be allocated for + * a packet equals to the number of the segments of that + * packet plus 1 context descriptor if needed. ++ * Recalculate the needed tx descs when TSO enabled in case ++ * the mbuf data size exceeds max data size that hw allows ++ * per tx desc. + */ +- nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); ++ if (ol_flags & PKT_TX_TCP_SEG) ++ nb_used = (uint16_t)(i40e_calc_pkt_desc(tx_pkt) + ++ nb_ctx); ++ else ++ nb_used = (uint16_t)(tx_pkt->nb_segs + nb_ctx); + tx_last = (uint16_t)(tx_id + nb_used - 1); + + /* Circular ring */ +@@ -1160,6 +1185,24 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) + slen = m_seg->data_len; + buf_dma_addr = rte_mbuf_data_iova(m_seg); + ++ while ((ol_flags & PKT_TX_TCP_SEG) && ++ unlikely(slen > I40E_MAX_DATA_PER_TXD)) { ++ txd->buffer_addr = ++ rte_cpu_to_le_64(buf_dma_addr); ++ txd->cmd_type_offset_bsz = ++ i40e_build_ctob(td_cmd, ++ td_offset, I40E_MAX_DATA_PER_TXD, ++ td_tag); ++ ++ buf_dma_addr += I40E_MAX_DATA_PER_TXD; ++ slen -= I40E_MAX_DATA_PER_TXD; ++ ++ txe->last_id = tx_last; ++ tx_id = txe->next_id; ++ txe = txn; ++ txd = &txr[tx_id]; ++ txn = &sw_ring[txe->next_id]; ++ } + PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n" + "buf_dma_addr: %#"PRIx64";\n" + "td_cmd: %#x;\n" |