summaryrefslogtreecommitdiffstats
path: root/src/vnet/ip/ip.h
diff options
context:
space:
mode:
authorSrikanth A <srakula@cisco.com>2019-10-02 17:48:58 -0700
committerJohn Lo <loj@cisco.com>2019-10-10 16:37:40 +0000
commit02833ff3294f4abbd8e3d52b38446e0f8f533ffc (patch)
treee2e9de14d3939938e502347723006f7bb374a02d /src/vnet/ip/ip.h
parent8a047ed741072bdb8d93b0841473eae06ae3c9d0 (diff)
tcp: custom checksum calculations for Ipv4/Ipv6
Type: feature Based on the configuration, we can disable checksum offload capability and calculate checksum while pushing the TCP & IP header. This saves some cycles when VPP stack is used in legacy hardware devices. Signed-off-by: Srikanth A <srakula@cisco.com> Change-Id: Ic1b3fcf3040917e47ee65263694ebf7437ac5668 (cherry picked from commit 3642782a2748503f5b5ccf89d1575c1d489948ef)
Diffstat (limited to 'src/vnet/ip/ip.h')
-rw-r--r--src/vnet/ip/ip.h79
1 files changed, 79 insertions, 0 deletions
diff --git a/src/vnet/ip/ip.h b/src/vnet/ip/ip.h
index 7a82dcf8f2b..65ccaef40c2 100644
--- a/src/vnet/ip/ip.h
+++ b/src/vnet/ip/ip.h
@@ -195,6 +195,85 @@ ip_incremental_checksum_buffer (vlib_main_t * vm,
return sum;
}
+always_inline u16
+ip_calculate_l4_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
+ ip_csum_t sum0, u32 payload_length,
+ u8 * iph, u32 ip_header_size, u8 * l4h)
+{
+ u16 sum16;
+ u8 *data_this_buffer, length_odd;
+ u32 n_bytes_left, n_this_buffer, n_ip_bytes_this_buffer;
+
+ n_bytes_left = payload_length;
+
+ if (l4h) /* packet l4 header and no buffer chain involved */
+ {
+ ASSERT (p0 == NULL);
+ n_this_buffer = payload_length;
+ data_this_buffer = l4h;
+ }
+ else
+ {
+ ASSERT (p0);
+ if (iph) /* ip header pointer set to packet in buffer */
+ {
+ ASSERT (ip_header_size);
+ n_this_buffer = payload_length;
+ data_this_buffer = iph + ip_header_size; /* at l4 header */
+ n_ip_bytes_this_buffer =
+ p0->current_length - (((u8 *) iph - p0->data) - p0->current_data);
+ if (PREDICT_FALSE (payload_length + ip_header_size >
+ n_ip_bytes_this_buffer))
+ {
+ n_this_buffer = n_ip_bytes_this_buffer - ip_header_size;
+ if (PREDICT_FALSE (n_this_buffer >> 31))
+ { /* error - ip header don't fit this buffer */
+ ASSERT (0);
+ return 0xfefe;
+ }
+ }
+ }
+ else /* packet in buffer with no ip header */
+ { /* buffer current pointer at l4 header */
+ n_this_buffer = p0->current_length;
+ data_this_buffer = vlib_buffer_get_current (p0);
+ }
+ n_this_buffer = clib_min (n_this_buffer, n_bytes_left);
+ }
+
+ while (1)
+ {
+ sum0 = ip_incremental_checksum (sum0, data_this_buffer, n_this_buffer);
+ n_bytes_left -= n_this_buffer;
+ if (n_bytes_left == 0)
+ break;
+
+ if (!(p0->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ ASSERT (0); /* error - more buffer expected */
+ return 0xfefe;
+ }
+
+ length_odd = (n_this_buffer & 1);
+
+ p0 = vlib_get_buffer (vm, p0->next_buffer);
+ data_this_buffer = vlib_buffer_get_current (p0);
+ n_this_buffer = clib_min (p0->current_length, n_bytes_left);
+
+ if (PREDICT_FALSE (length_odd))
+ {
+ /* Prepend a 0 byte to maintain 2-byte checksum alignment */
+ data_this_buffer--;
+ n_this_buffer++;
+ n_bytes_left++;
+ data_this_buffer[0] = 0;
+ }
+ }
+
+ sum16 = ~ip_csum_fold (sum0);
+ return sum16;
+}
+
void ip_del_all_interface_addresses (vlib_main_t * vm, u32 sw_if_index);
extern vlib_node_registration_t ip4_inacl_node;