summaryrefslogtreecommitdiffstats
path: root/src/vnet/pg/input.c
diff options
context:
space:
mode:
authorMohsin Kazmi <sykazmi@cisco.com>2021-02-10 11:26:24 +0100
committerDamjan Marion <dmarion@me.com>2021-02-15 20:32:56 +0000
commit6809538e646bf86c000dc1faba60b0a4157ad898 (patch)
tree18a228b96226932381f15e44b4972636de1c7fe1 /src/vnet/pg/input.c
parent99c6dc6a7a36c0be95da9afb3ad8830b24754d4e (diff)
vlib: refactor checksum offload support
Type: refactor This patch refactors the offload flags in vlib_buffer_t. There are two main reasons behind this refactoring. First, offload flags are insufficient to represent outer and inner headers offloads. Second, room for these flags in first cacheline of vlib_buffer_t is also limited. This patch introduces a generic offload flag in first cacheline. And detailed offload flags in 2nd cacheline of the structure for performance optimization. Change-Id: Icc363a142fb9208ec7113ab5bbfc8230181f6004 Signed-off-by: Mohsin Kazmi <sykazmi@cisco.com>
Diffstat (limited to 'src/vnet/pg/input.c')
-rw-r--r--src/vnet/pg/input.c31
1 files changed, 18 insertions, 13 deletions
diff --git a/src/vnet/pg/input.c b/src/vnet/pg/input.c
index 940bf124c56..d3501b3e3b6 100644
--- a/src/vnet/pg/input.c
+++ b/src/vnet/pg/input.c
@@ -1537,13 +1537,14 @@ pg_input_trace (pg_main_t * pg,
}
static_always_inline void
-fill_buffer_offload_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
- int gso_enabled, u32 gso_size)
+fill_buffer_offload_flags (vlib_main_t *vm, u32 *buffers, u32 n_buffers,
+ u32 buffer_oflags, int gso_enabled, u32 gso_size)
{
for (int i = 0; i < n_buffers; i++)
{
vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[i]);
u8 l4_proto = 0;
+ u32 oflags = 0;
ethernet_header_t *eh =
(ethernet_header_t *) vlib_buffer_get_current (b0);
@@ -1574,10 +1575,11 @@ fill_buffer_offload_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
l4_proto = ip4->protocol;
b0->flags |=
- (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
- b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID
- | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
- VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
+ (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
+ if (buffer_oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
+ oflags |= VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
}
else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
{
@@ -1594,7 +1596,8 @@ fill_buffer_offload_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
if (l4_proto == IP_PROTOCOL_TCP)
{
- b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ if (buffer_oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
+ oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
/* only set GSO flag for chained buffers */
if (gso_enabled && (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
@@ -1609,8 +1612,12 @@ fill_buffer_offload_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
}
else if (l4_proto == IP_PROTOCOL_UDP)
{
- b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
+ if (buffer_oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
+ oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
}
+
+ if (oflags)
+ vnet_buffer_offload_flags_set (b0, oflags);
}
}
@@ -1710,13 +1717,11 @@ pg_generate_packets (vlib_node_runtime_t * node,
vnet_buffer (b)->feature_arc_index = feature_arc_index;
}
- if (pi->gso_enabled ||
- (s->buffer_flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
- VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
- VNET_BUFFER_F_OFFLOAD_IP_CKSUM)))
+ if (pi->gso_enabled || (s->buffer_flags & VNET_BUFFER_F_OFFLOAD))
{
fill_buffer_offload_flags (vm, to_next, n_this_frame,
- pi->gso_enabled, pi->gso_size);
+ s->buffer_oflags, pi->gso_enabled,
+ pi->gso_size);
}
n_trace = vlib_get_trace_count (vm, node);