summaryrefslogtreecommitdiffstats
path: root/src/vnet/pg/input.c
diff options
context:
space:
mode:
authorVladimir Isaev <visaev@netgate.com>2020-05-21 16:34:17 +0300
committerJohn Lo <loj@cisco.com>2020-06-08 14:17:59 +0000
commit698eb87a8eed847fe555ef327bcc99a4467ed59a (patch)
tree102c29c5d96a23a3e185e4d2d765f763445e8076 /src/vnet/pg/input.c
parente3621518046ad7f37ccf77c549a93375ab89da19 (diff)
vxlan: Fixed checksum caclculation offset
VXLAN uses csum_offload for IPv6 packets. But without gso node we have csum calculated only for inner packet. This patch adds support for outer header csum calculation. Checksum for inner packet should be calculated before interface-output node (for example in vxlan node). Type: fix Signed-off-by: Mohsin Kazmi <sykazmi@cisco.com> Signed-off-by: Vladimir Isaev <visaev@netgate.com> Change-Id: Ica68429ede4426293769207cd83c791ebe72fe56
Diffstat (limited to 'src/vnet/pg/input.c')
-rw-r--r--src/vnet/pg/input.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/src/vnet/pg/input.c b/src/vnet/pg/input.c
index 8ba3b361894..483add0b5f3 100644
--- a/src/vnet/pg/input.c
+++ b/src/vnet/pg/input.c
@@ -1530,10 +1530,9 @@ pg_input_trace (pg_main_t * pg,
}
static_always_inline void
-fill_gso_buffer_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
- u32 packet_data_size)
+fill_buffer_offload_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
+ int gso_enabled, u32 gso_size)
{
-
for (int i = 0; i < n_buffers; i++)
{
vlib_buffer_t *b0 = vlib_get_buffer (vm, buffers[i]);
@@ -1586,16 +1585,21 @@ fill_gso_buffer_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
}
+
if (l4_proto == IP_PROTOCOL_TCP)
{
- b0->flags |= (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_GSO);
+ b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b0) +
vnet_buffer
(b0)->l4_hdr_offset);
- l4_hdr_sz = tcp_header_bytes (tcp);
tcp->checksum = 0;
- vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
- vnet_buffer2 (b0)->gso_size = packet_data_size;
+ if (gso_enabled)
+ {
+ b0->flags |= VNET_BUFFER_F_GSO;
+ l4_hdr_sz = tcp_header_bytes (tcp);
+ vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
+ vnet_buffer2 (b0)->gso_size = gso_size;
+ }
}
else if (l4_proto == IP_PROTOCOL_UDP)
{
@@ -1603,7 +1607,6 @@ fill_gso_buffer_flags (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
udp_header_t *udp = (udp_header_t *) (vlib_buffer_get_current (b0) +
vnet_buffer
(b0)->l4_hdr_offset);
- vnet_buffer2 (b0)->gso_l4_hdr_sz = sizeof (*udp);
udp->checksum = 0;
}
}
@@ -1700,8 +1703,14 @@ pg_generate_packets (vlib_node_runtime_t * node,
vnet_buffer (b)->feature_arc_index = feature_arc_index;
}
- if (pi->gso_enabled)
- fill_gso_buffer_flags (vm, to_next, n_this_frame, pi->gso_size);
+ if (pi->gso_enabled ||
+ (s->buffer_flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
+ VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
+ VNET_BUFFER_F_OFFLOAD_IP_CKSUM)))
+ {
+ fill_buffer_offload_flags (vm, to_next, n_this_frame,
+ pi->gso_enabled, pi->gso_size);
+ }
n_trace = vlib_get_trace_count (vm, node);
if (n_trace > 0)