diff options
author | Monendra Singh Kushwaha <kmonendra@marvell.com> | 2024-04-19 16:34:49 +0530 |
---|---|---|
committer | Damjan Marion <dmarion@0xa5.net> | 2024-05-06 10:06:51 +0000 |
commit | c2bbc7bc8ce84672a7a47a99f84db9e588486c95 (patch) | |
tree | 2d101e7b2e402a330ae8a8c6821116654d3c655e | |
parent | 622003c81354b451307fcb11a2296c7a0c60fafb (diff) |
octeon: add max packet length check
This patch compares packet length with maximum supported
packet length and drops the packet accordingly.
Type: fix
Change-Id: I80ef453d43149818936649e1e58ae90c84a34ab9
Signed-off-by: Monendra Singh Kushwaha <kmonendra@marvell.com>
-rw-r--r-- | src/plugins/dev_octeon/octeon.h | 3 | ||||
-rw-r--r-- | src/plugins/dev_octeon/tx_node.c | 26 |
2 files changed, 27 insertions, 2 deletions
diff --git a/src/plugins/dev_octeon/octeon.h b/src/plugins/dev_octeon/octeon.h index 92ec953ed23..a1280379bf3 100644 --- a/src/plugins/dev_octeon/octeon.h +++ b/src/plugins/dev_octeon/octeon.h @@ -162,7 +162,8 @@ vnet_dev_rv_t oct_flow_query (vlib_main_t *, vnet_dev_port_t *, u32, uword, _ (AURA_BATCH_ALLOC_ISSUE_FAIL, aura_batch_alloc_issue_fail, ERROR, \ "aura batch alloc issue failed") \ _ (AURA_BATCH_ALLOC_NOT_READY, aura_batch_alloc_not_ready, ERROR, \ - "aura batch alloc not ready") + "aura batch alloc not ready") \ + _ (MTU_EXCEEDED, mtu_exceeded, ERROR, "mtu exceeded") typedef enum { diff --git a/src/plugins/dev_octeon/tx_node.c b/src/plugins/dev_octeon/tx_node.c index ce8709959a5..a2e4b07de8a 100644 --- a/src/plugins/dev_octeon/tx_node.c +++ b/src/plugins/dev_octeon/tx_node.c @@ -22,8 +22,11 @@ typedef struct u32 n_tx_bytes; u32 n_drop; vlib_buffer_t *drop[VLIB_FRAME_SIZE]; + u32 n_exd_mtu; + vlib_buffer_t *exd_mtu[VLIB_FRAME_SIZE]; u32 batch_alloc_not_ready; u32 batch_alloc_issue_fail; + int max_pkt_len; u16 lmt_id; u64 lmt_ioaddr; lmt_line_t *lmt_lines; @@ -149,6 +152,12 @@ oct_tx_enq1 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vlib_buffer_t *b, }, }; + if (PREDICT_FALSE (vlib_buffer_length_in_chain (vm, b) > ctx->max_pkt_len)) + { + ctx->exd_mtu[ctx->n_exd_mtu++] = b; + return 0; + } + if (!simple && flags & VLIB_BUFFER_NEXT_PRESENT) { u8 n_tail_segs = 0; @@ -360,6 +369,8 @@ VNET_DEV_NODE_FN (oct_tx_node) vnet_dev_tx_node_runtime_t *rt = vnet_dev_get_tx_node_runtime (node); vnet_dev_tx_queue_t *txq = rt->tx_queue; oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq); + vnet_dev_t *dev = txq->port->dev; + oct_device_t *cd = vnet_dev_get_data (dev); u32 node_index = node->node_index; u32 *from = vlib_frame_vector_args (frame); u32 n, n_enq, n_left, n_pkts = frame->n_vectors; @@ -373,6 +384,7 @@ VNET_DEV_NODE_FN (oct_tx_node) .sq = ctq->sq.qid, .sizem1 = 1, }, + .max_pkt_len = roc_nix_max_pkt_len (cd->nix), .lmt_id = lmt_id, .lmt_ioaddr = ctq->io_addr, .lmt_lines = ctq->lmt_addr + (lmt_id << ROC_LMT_LINE_SIZE_LOG2), @@ -406,7 +418,7 @@ VNET_DEV_NODE_FN (oct_tx_node) n += oct_tx_enq16 (vm, &ctx, txq, b, n_left, /* trace */ 0); } - ctq->n_enq = n_enq + n - ctx.n_drop; + ctq->n_enq = n_enq + n - ctx.n_drop - ctx.n_exd_mtu; if (n < n_pkts) { @@ -421,6 +433,10 @@ VNET_DEV_NODE_FN (oct_tx_node) vlib_error_count (vm, node->node_index, OCT_TX_NODE_CTR_CHAIN_TOO_LONG, ctx.n_drop); + if (PREDICT_FALSE (ctx.n_exd_mtu)) + vlib_error_count (vm, node->node_index, OCT_TX_NODE_CTR_MTU_EXCEEDED, + ctx.n_exd_mtu); + if (ctx.batch_alloc_not_ready) vlib_error_count (vm, node_index, OCT_TX_NODE_CTR_AURA_BATCH_ALLOC_NOT_READY, @@ -441,5 +457,13 @@ VNET_DEV_NODE_FN (oct_tx_node) n_pkts -= ctx.n_drop; } + if (PREDICT_FALSE (ctx.n_exd_mtu)) + { + u32 bi[VLIB_FRAME_SIZE]; + vlib_get_buffer_indices (vm, ctx.exd_mtu, bi, ctx.n_exd_mtu); + vlib_buffer_free (vm, bi, ctx.n_exd_mtu); + n_pkts -= ctx.n_exd_mtu; + } + return n_pkts; } |