summaryrefslogtreecommitdiffstats
path: root/extras/emacs/pipe-skel.el
blob: bfae58dc92260892241b8f6c8cc4af20582183c5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
;;; Copyright (c) 2016 Cisco and/or its affiliates.
;;; Licensed under the Apache License, Version 2.0 (the "License");
;;; you may not use this file except in compliance with the License.
;;; You may obtain a copy of the License at:
;;;
;;;     http://www.apache.org/licenses/LICENSE-2.0
;;;
;;; Unless required by applicable law or agreed to in writing, software
;;; distributed under the License is distributed on an "AS IS" BASIS,
;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
;;; See the License for the specific language governing permissions and
;;; limitations under the License.

;;; pipe-skel.el - pipelined graph node skeleton

(require 'skeleton)

(define-skeleton skel-pipeline-node
"Insert a skeleton pipelined graph node"
nil
'(setq node-name (skeleton-read "Node Name: "))
'(setq uc-node-name (upcase node-name))
'(setq nstages (skeleton-read "Number of pipeline stages: "))
"
#include <vlib/vlib.h>
#include <vppinfra/error.h>

/*
 * Dump these counters via the \"show error\" CLI command 
 * FIXME: Add packet counter / error strings as desired
 */

#define foreach_" node-name "_error \\
_(ERROR1, \"sample counter/ error string\")

static char * " node-name "_error_strings[] = {
#define _(sym,string) string,
  foreach_" node-name "_error
#undef _
};

/*
 * packet error / counter enumeration
 *
 * To count and drop a vlib_buffer_t *b:
 *
 *     Set b->error = node->errors[" uc-node-name "_ERROR_xxx];
 *     last_stage returns a disposition index bound to \"error-drop\"
 * 
 * To manually increment the specific counter " uc-node-name "_ERROR1
 *
 *  vlib_node_t *n = vlib_get_node (vm, " node-name ".index);
 *  u32 node_counter_base_index = n->error_heap_index;
 *  vlib_error_main_t * em = &vm->error_main;
 *  em->counters[node_counter_base_index + " uc-node-name "_ERROR1] += 1;
 * 
 */

typedef enum {
#define _(sym,str) " uc-node-name "_ERROR_##sym,
    foreach_" node-name "_error
#undef _
    " uc-node-name "_N_ERROR,
} " node-name "_error_t;

/*
 * enumeration of per-packet dispositions
 * FIXME: add dispositions as desired
 */

typedef enum { \n"
"    " uc-node-name "_NEXT_NORMAL,\n"
"    " uc-node-name "_N_NEXT,
} " node-name "_next_t;

#define NSTAGES " nstages "

/* 
 * Use the generic buffer metadata + first line of packet data prefetch
 * stage function from <api/pipeline.h>. This is usually a Good Idea.
 */
#define stage0 generic_stage0

/* 
 * FIXME: add stage functions. Here is the function prototype:
 * 
 * static inline void stageN (vlib_main_t * vm,
 *                            vlib_node_runtime_t * node,
 *                            u32 buffer_index)
 */

/*
 * FIXME: the last pipeline stage returns the desired pkt next node index,
 * from the " node-name "_next_t enum above
 */
static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
                              u32 bi)
{
    vlib_buffer_t *b = vlib_get_buffer (vm, bi);

    b->error = node->errors[EXAMPLE_ERROR_ERROR1];

    return " uc-node-name "_NEXT_NORMAL;
}

#include <api/pipeline.h>

static uword " node-name "_node_fn (vlib_main_t * vm,
                              vlib_node_runtime_t * node,
                              vlib_frame_t * frame)
{
    return dispatch_pipeline (vm, node, frame);
}

static VLIB_REGISTER_NODE (example_node) = {
  .function = " node-name "_node_fn,
  .name = \"" node-name "-node\",
  .vector_size = sizeof (u32),
  .type = VLIB_NODE_TYPE_INTERNAL,
  
  .n_errors = ARRAY_LEN(" node-name "_error_strings),
  .error_strings = " node-name "_error_strings,

  .n_next_nodes = " uc-node-name "_N_NEXT,

  /* edit / add dispositions here */
  .next_nodes = {
        [" uc-node-name "_NEXT_NORMAL] = \"error-drop\",
  },
};

/* 
 * packet generator definition to push superframes of data into the
 * new graph node. Cut and paste into <file>, then
 * \"exec <file>\", \"pa enable test\" at the QVNET prompt...
 * 
packet-generator new {
  name test
  limit 100
  node " node-name "-node
  size 374-374
  data { hex 0x02b46b96000100096978676265000500bf436973636f20494f5320536f6674776172652c2043333735304520536f66747761726520284333373530452d554e4956455253414c2d4d292c2056657273696f6e2031322e32283335295345352c2052454c4541534520534f4654574152452028666331290a436f707972696768742028632920313938362d3230303720627920436973636f2053797374656d732c20496e632e0a436f6d70696c6564205468752031392d4a756c2d30372031363a3137206279206e616368656e00060018636973636f2057532d4333373530452d3234544400020011000000010101cc0004000000000003001b54656e4769676162697445746865726e6574312f302f3100040008000000280008002400000c011200000000ffffffff010221ff000000000000001e7a50f000ff000000090004000a00060001000b0005010012000500001300050000160011000000010101cc000400000000001a00100000000100000000ffffffff }
}
 */
")
ent.Multiline */ .highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */ .highlight .cpf { color: #888888 } /* Comment.PreprocFile */ .highlight .c1 { color: #888888 } /* Comment.Single */ .highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #aa0000 } /* Generic.Error */ .highlight .gh { color: #333333 } /* Generic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
/*
 * Copyright (c) 2020 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef included_gro_func_h
#define included_gro_func_h

#include <vnet/ethernet/ethernet.h>
#include <vnet/gso/gro.h>
#include <vnet/gso/hdr_offset_parser.h>
#include <vnet/udp/udp_packet.h>
#include <vnet/tcp/tcp.h>
#include <vnet/vnet.h>

static_always_inline u8
gro_is_bad_packet (vlib_buffer_t * b, u8 flags, i16 l234_sz)
{
  if (((b->current_length - l234_sz) <= 0) || ((flags &= ~TCP_FLAG_ACK) != 0))
    return 1;
  return 0;
}

static_always_inline void
gro_get_ip4_flow_from_packet (u32 * sw_if_index,
			      ip4_header_t * ip4, tcp_header_t * tcp,
			      gro_flow_key_t * flow_key, int is_l2)
{
  flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
  flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
  ip46_address_set_ip4 (&flow_key->src_address, &ip4->src_address);
  ip46_address_set_ip4 (&flow_key->dst_address, &ip4->dst_address);
  flow_key->src_port = tcp->src_port;
  flow_key->dst_port = tcp->dst_port;
}

static_always_inline void
gro_get_ip6_flow_from_packet (u32 * sw_if_index,
			      ip6_header_t * ip6, tcp_header_t * tcp,
			      gro_flow_key_t * flow_key, int is_l2)
{
  flow_key->sw_if_index[VLIB_RX] = sw_if_index[VLIB_RX];
  flow_key->sw_if_index[VLIB_TX] = sw_if_index[VLIB_TX];
  ip46_address_set_ip6 (&flow_key->src_address, &ip6->src_address);
  ip46_address_set_ip6 (&flow_key->dst_address, &ip6->dst_address);
  flow_key->src_port = tcp->src_port;
  flow_key->dst_port = tcp->dst_port;
}

static_always_inline u32
gro_is_ip4_or_ip6_packet (vlib_buffer_t * b0, int is_l2)
{
  if (b0->flags & VNET_BUFFER_F_IS_IP4)
    return VNET_BUFFER_F_IS_IP4;
  if (b0->flags & VNET_BUFFER_F_IS_IP6)
    return VNET_BUFFER_F_IS_IP6;
  if (is_l2)
    {
      ethernet_header_t *eh =
	(ethernet_header_t *) vlib_buffer_get_current (b0);
      u16 ethertype = clib_net_to_host_u16 (eh->type);

      if (ethernet_frame_is_tagged (ethertype))
	{
	  ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);

	  ethertype = clib_net_to_host_u16 (vlan->type);
	  if (ethertype == ETHERNET_TYPE_VLAN)
	    {
	      vlan++;
	      ethertype = clib_net_to_host_u16 (vlan->type);
	    }
	}
      if (ethertype == ETHERNET_TYPE_IP4)
	return VNET_BUFFER_F_IS_IP4;
      if (ethertype == ETHERNET_TYPE_IP6)
	return VNET_BUFFER_F_IS_IP6;
    }
  else
    {
      if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x40)
	return VNET_BUFFER_F_IS_IP4;
      if ((((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0) == 0x60)
	return VNET_BUFFER_F_IS_IP6;
    }

  return 0;
}

typedef enum
{
  GRO_PACKET_ACTION_NONE = 0,
  GRO_PACKET_ACTION_ENQUEUE = 1,
  GRO_PACKET_ACTION_FLUSH = 2,
} gro_packet_action_t;

static_always_inline gro_packet_action_t
gro_tcp_sequence_check (tcp_header_t * tcp0, tcp_header_t * tcp1,
			u32 payload_len0)
{
  u32 next_tcp_seq0 = clib_net_to_host_u32 (tcp0->seq_number);
  u32 next_tcp_seq1 = clib_net_to_host_u32 (tcp1->seq_number);

  /* next packet, enqueue */
  if (PREDICT_TRUE (next_tcp_seq0 + payload_len0 == next_tcp_seq1))
    return GRO_PACKET_ACTION_ENQUEUE;
  /* flush all packets */
  else
    return GRO_PACKET_ACTION_FLUSH;
}

static_always_inline void
gro_merge_buffers (vlib_main_t * vm, vlib_buffer_t * b0,
		   vlib_buffer_t * b1, u32 bi1, u32 payload_len1,
		   u16 l234_sz1)
{
  vlib_buffer_t *pb = b0;

  if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
    b0->total_length_not_including_first_buffer = 0;

  while (pb->flags & VLIB_BUFFER_NEXT_PRESENT)
    pb = vlib_get_buffer (vm, pb->next_buffer);

  vlib_buffer_advance (b1, l234_sz1);
  pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
  pb->next_buffer = bi1;
  b0->total_length_not_including_first_buffer += payload_len1;
  b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
}

static_always_inline u32
gro_get_packet_data (vlib_main_t * vm, vlib_buffer_t * b0,
		     generic_header_offset_t * gho0,
		     gro_flow_key_t * flow_key0, int is_l2)
{
  ip4_header_t *ip4_0 = 0;
  ip6_header_t *ip6_0 = 0;
  tcp_header_t *tcp0 = 0;
  u32 pkt_len0 = 0;
  u16 l234_sz0 = 0;
  u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };

  u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);

  if (is_ip0 & VNET_BUFFER_F_IS_IP4)
    vnet_generic_header_offset_parser (b0, gho0, is_l2, 1 /* is_ip4 */ ,
				       0 /* is_ip6 */ );
  else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
    vnet_generic_header_offset_parser (b0, gho0, is_l2, 0 /* is_ip4 */ ,
				       1 /* is_ip6 */ );
  else
    return 0;

  if (PREDICT_FALSE ((gho0->gho_flags & GHO_F_TCP) == 0))
    return 0;

  ip4_0 =
    (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
  ip6_0 =
    (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0->l3_hdr_offset);
  tcp0 =
    (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0->l4_hdr_offset);

  l234_sz0 = gho0->hdr_sz;
  if (PREDICT_FALSE (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)))
    return 0;

  sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
  sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];

  if (gho0->gho_flags & GHO_F_IP4)
    {
      gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, flow_key0,
				    is_l2);
    }
  else if (gho0->gho_flags & GHO_F_IP6)
    {
      gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, flow_key0,
				    is_l2);
    }
  else
    return 0;

  pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
  if (PREDICT_FALSE (pkt_len0 >= TCP_MAX_GSO_SZ))
    return 0;

  return pkt_len0;
}

static_always_inline u32
gro_coalesce_buffers (vlib_main_t * vm, vlib_buffer_t * b0,
		      vlib_buffer_t * b1, u32 bi1, int is_l2)
{
  generic_header_offset_t gho0 = { 0 };
  generic_header_offset_t gho1 = { 0 };
  gro_flow_key_t flow_key0, flow_key1;
  ip4_header_t *ip4_0, *ip4_1;
  ip6_header_t *ip6_0, *ip6_1;
  tcp_header_t *tcp0, *tcp1;
  u16 l234_sz0, l234_sz1;
  u32 pkt_len0, pkt_len1, payload_len0, payload_len1;
  u32 sw_if_index0[VLIB_N_RX_TX] = { ~0 };
  u32 sw_if_index1[VLIB_N_RX_TX] = { ~0 };

  u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);
  u32 is_ip1 = gro_is_ip4_or_ip6_packet (b1, is_l2);

  if (is_ip0 & VNET_BUFFER_F_IS_IP4)
    vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
				       0 /* is_ip6 */ );
  else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
    vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
				       1 /* is_ip6 */ );
  else
    return 0;

  if (is_ip1 & VNET_BUFFER_F_IS_IP4)
    vnet_generic_header_offset_parser (b1, &gho1, is_l2, 1 /* is_ip4 */ ,
				       0 /* is_ip6 */ );
  else if (is_ip1 & VNET_BUFFER_F_IS_IP6)
    vnet_generic_header_offset_parser (b1, &gho1, is_l2, 0 /* is_ip4 */ ,
				       1 /* is_ip6 */ );
  else
    return 0;

  pkt_len0 = vlib_buffer_length_in_chain (vm, b0);
  pkt_len1 = vlib_buffer_length_in_chain (vm, b1);

  if (((gho0.gho_flags & GHO_F_TCP) == 0)
      || ((gho1.gho_flags & GHO_F_TCP) == 0))
    return 0;

  ip4_0 =
    (ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
  ip4_1 =
    (ip4_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);
  ip6_0 =
    (ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
  ip6_1 =
    (ip6_header_t *) (vlib_buffer_get_current (b1) + gho1.l3_hdr_offset);

  tcp0 = (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
  tcp1 = (tcp_header_t *) (vlib_buffer_get_current (b1) + gho1.l4_hdr_offset);

  l234_sz0 = gho0.hdr_sz;
  l234_sz1 = gho1.hdr_sz;

  if (gro_is_bad_packet (b0, tcp0->flags, l234_sz0)
      || gro_is_bad_packet (b1, tcp1->flags, l234_sz1))
    return 0;

  sw_if_index0[VLIB_RX] = vnet_buffer (b0)->sw_if_index[VLIB_RX];
  sw_if_index0[VLIB_TX] = vnet_buffer (b0)->sw_if_index[VLIB_TX];

  sw_if_index1[VLIB_RX] = vnet_buffer (b1)->sw_if_index[VLIB_RX];
  sw_if_index1[VLIB_TX] = vnet_buffer (b1)->sw_if_index[VLIB_TX];

  if ((gho0.gho_flags & GHO_F_IP4) && (gho1.gho_flags & GHO_F_IP4))
    {
      gro_get_ip4_flow_from_packet (sw_if_index0, ip4_0, tcp0, &flow_key0,
				    is_l2);
      gro_get_ip4_flow_from_packet (sw_if_index1, ip4_1, tcp1, &flow_key1,
				    is_l2);
    }
  else if ((gho0.gho_flags & GHO_F_IP6) && (gho1.gho_flags & GHO_F_IP6))
    {
      gro_get_ip6_flow_from_packet (sw_if_index0, ip6_0, tcp0, &flow_key0,
				    is_l2);
      gro_get_ip6_flow_from_packet (sw_if_index1, ip6_1, tcp1, &flow_key1,
				    is_l2);
    }
  else
    return 0;

  if (gro_flow_is_equal (&flow_key0, &flow_key1) == 0)
    return 0;

  payload_len0 = pkt_len0 - l234_sz0;
  payload_len1 = pkt_len1 - l234_sz1;

  if (pkt_len0 >= TCP_MAX_GSO_SZ || pkt_len1 >= TCP_MAX_GSO_SZ
      || (pkt_len0 + payload_len1) >= TCP_MAX_GSO_SZ)
    return 0;

  if (gro_tcp_sequence_check (tcp0, tcp1, payload_len0) ==
      GRO_PACKET_ACTION_ENQUEUE)
    {
      gro_merge_buffers (vm, b0, b1, bi1, payload_len1, l234_sz1);
      return tcp1->ack_number;
    }

  return 0;
}

static_always_inline void
gro_fixup_header (vlib_main_t * vm, vlib_buffer_t * b0, u32 ack_number,
		  int is_l2)
{
  generic_header_offset_t gho0 = { 0 };

  u32 is_ip0 = gro_is_ip4_or_ip6_packet (b0, is_l2);

  if (is_ip0 & VNET_BUFFER_F_IS_IP4)
    vnet_generic_header_offset_parser (b0, &gho0, is_l2, 1 /* is_ip4 */ ,
				       0 /* is_ip6 */ );
  else if (is_ip0 & VNET_BUFFER_F_IS_IP6)
    vnet_generic_header_offset_parser (b0, &gho0, is_l2, 0 /* is_ip4 */ ,
				       1 /* is_ip6 */ );

  vnet_buffer2 (b0)->gso_size = b0->current_length - gho0.hdr_sz;

  if (gho0.gho_flags & GHO_F_IP4)
    {
      ip4_header_t *ip4 =
	(ip4_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
      ip4->length =
	clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
			      gho0.l3_hdr_offset);
      b0->flags |=
	(VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4 |
	 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
    }
  else if (gho0.gho_flags & GHO_F_IP6)
    {
      ip6_header_t *ip6 =
	(ip6_header_t *) (vlib_buffer_get_current (b0) + gho0.l3_hdr_offset);
      ip6->payload_length =
	clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
			      gho0.l4_hdr_offset);
      b0->flags |=
	(VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6 |
	 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
    }

  tcp_header_t *tcp0 =
    (tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
  tcp0->ack_number = ack_number;
  b0->flags &= ~VLIB_BUFFER_IS_TRACED;
}

static_always_inline u32
vnet_gro_flow_table_flush (vlib_main_t * vm, gro_flow_table_t * flow_table,
			   u32 * to)
{
  if (flow_table->flow_table_size > 0)
    {
      gro_flow_t *gro_flow;
      u32 i = 0, j = 0;
      while (i < GRO_FLOW_TABLE_MAX_SIZE)
	{
	  gro_flow = &flow_table->gro_flow[i];
	  if (gro_flow->n_buffers && gro_flow_is_timeout (vm, gro_flow))
	    {
	      // flush the packet
	      vlib_buffer_t *b0 =
		vlib_get_buffer (vm, gro_flow->buffer_index);
	      gro_fixup_header (vm, b0, gro_flow->last_ack_number,
				flow_table->is_l2);
	      to[j] = gro_flow->buffer_index;
	      gro_flow_table_reset_flow (flow_table, gro_flow);
	      flow_table->n_vectors++;
	      j++;
	    }
	  i++;
	}

      return j;
    }
  return 0;
}

static_always_inline void
vnet_gro_flow_table_schedule_node_on_dispatcher (vlib_main_t * vm,
						 gro_flow_table_t *
						 flow_table)
{
  if (gro_flow_table_is_timeout (vm, flow_table))
    {
      u32 to[GRO_FLOW_TABLE_MAX_SIZE] = { 0 };
      u32 n_to = vnet_gro_flow_table_flush (vm, flow_table, to);

      if (n_to > 0)
	{
	  u32 node_index = flow_table->node_index;
	  vlib_frame_t *f = vlib_get_frame_to_node (vm, node_index);
	  u32 *f_to = vlib_frame_vector_args (f);
	  u32 i = 0;

	  while (i < n_to)
	    {
	      f_to[f->n_vectors] = to[i];
	      i++;
	      f->n_vectors++;
	    }
	  vlib_put_frame_to_node (vm, node_index, f);
	}
      gro_flow_table_set_timeout (vm, flow_table, GRO_FLOW_TABLE_FLUSH);
    }
}

static_always_inline u32
vnet_gro_flow_table_inline (vlib_main_t * vm, gro_flow_table_t * flow_table,
			    u32 bi0, u32 * to)
{
  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
  generic_header_offset_t gho0 = { 0 };
  gro_flow_t *gro_flow = 0;
  gro_flow_key_t flow_key0 = { };
  tcp_header_t *tcp0 = 0;
  u32 pkt_len0 = 0;
  int is_l2 = flow_table->is_l2;

  if (!gro_flow_table_is_enable (flow_table))
    {
      to[0] = bi0;
      return 1;
    }

  if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_GSO))
    {
      to[0] = bi0;
      return 1;
    }

  pkt_len0 = gro_get_packet_data (vm, b0, &gho0, &flow_key0, is_l2);
  if (pkt_len0 == 0)
    {
      to[0] = bi0;
      return 1;
    }

  gro_flow = gro_flow_table_find_or_add_flow (flow_table, &flow_key0);
  if (!gro_flow)
    {
      to[0] = bi0;
      return 1;
    }

  if (PREDICT_FALSE (gro_flow->n_buffers == 0))
    {
      flow_table->total_vectors++;
      gro_flow_store_packet (gro_flow, bi0);
      tcp0 =
	(tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
      gro_flow->last_ack_number = tcp0->ack_number;
      gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
      return 0;
    }
  else
    {
      tcp0 =
	(tcp_header_t *) (vlib_buffer_get_current (b0) + gho0.l4_hdr_offset);
      generic_header_offset_t gho_s = { 0 };
      tcp_header_t *tcp_s;
      u16 l234_sz0, l234_sz_s;
      u32 pkt_len_s, payload_len0, payload_len_s;
      u32 bi_s = gro_flow->buffer_index;

      vlib_buffer_t *b_s = vlib_get_buffer (vm, bi_s);
      u32 is_ip_s = gro_is_ip4_or_ip6_packet (b_s, is_l2);
      if (is_ip_s & VNET_BUFFER_F_IS_IP4)
	vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
					   1 /* is_ip4 */ , 0 /* is_ip6 */ );
      else if (is_ip_s & VNET_BUFFER_F_IS_IP6)
	vnet_generic_header_offset_parser (b_s, &gho_s, is_l2,
					   0 /* is_ip4 */ , 1 /* is_ip6 */ );

      tcp_s =
	(tcp_header_t *) (vlib_buffer_get_current (b_s) +
			  gho_s.l4_hdr_offset);
      pkt_len_s = vlib_buffer_length_in_chain (vm, b_s);
      l234_sz0 = gho0.hdr_sz;
      l234_sz_s = gho_s.hdr_sz;
      payload_len0 = pkt_len0 - l234_sz0;
      payload_len_s = pkt_len_s - l234_sz_s;
      gro_packet_action_t action =
	gro_tcp_sequence_check (tcp_s, tcp0, payload_len_s);

      if (PREDICT_TRUE (action == GRO_PACKET_ACTION_ENQUEUE))
	{
	  if (PREDICT_TRUE ((pkt_len_s + payload_len0) < TCP_MAX_GSO_SZ))
	    {
	      flow_table->total_vectors++;
	      gro_merge_buffers (vm, b_s, b0, bi0, payload_len0, l234_sz0);
	      gro_flow_store_packet (gro_flow, bi0);
	      gro_flow->last_ack_number = tcp0->ack_number;
	      return 0;
	    }
	  else
	    {
	      // flush the stored GSO size packet and buffer the current packet
	      flow_table->n_vectors++;
	      flow_table->total_vectors++;
	      gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
	      gro_flow->n_buffers = 0;
	      gro_flow_store_packet (gro_flow, bi0);
	      gro_flow->last_ack_number = tcp0->ack_number;
	      gro_flow_set_timeout (vm, gro_flow, GRO_FLOW_TIMEOUT);
	      to[0] = bi_s;
	      return 1;
	    }
	}
      else
	{
	  // flush the all (current and stored) packets
	  flow_table->n_vectors++;
	  flow_table->total_vectors++;
	  gro_fixup_header (vm, b_s, gro_flow->last_ack_number, is_l2);
	  gro_flow->n_buffers = 0;
	  gro_flow_table_reset_flow (flow_table, gro_flow);
	  to[0] = bi_s;
	  to[1] = bi0;
	  return 2;
	}
    }
}

/**
 * coalesce buffers with flow tables
 */
static_always_inline u32
vnet_gro_inline (vlib_main_t * vm, gro_flow_table_t * flow_table, u32 * from,
		 u16 n_left_from, u32 * to)
{
  u16 count = 0, i = 0;

  for (i = 0; i < n_left_from; i++)
    count += vnet_gro_flow_table_inline (vm, flow_table, from[i], &to[count]);

  return count;
}

/**
 * coalesce buffers in opportunistic way without flow tables
 */
static_always_inline u32
vnet_gro_simple_inline (vlib_main_t * vm, u32 * from, u16 n_left_from,
			int is_l2)
{
  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
  vlib_get_buffers (vm, from, b, n_left_from);
  u32 bi = 1, ack_number = 0;
  if (PREDICT_TRUE (((b[0]->flags & VNET_BUFFER_F_GSO) == 0)))
    {
      while (n_left_from > 1)
	{
	  if (PREDICT_TRUE (((b[bi]->flags & VNET_BUFFER_F_GSO) == 0)))
	    {
	      u32 ret;
	      if ((ret =
		   gro_coalesce_buffers (vm, b[0], b[bi], from[bi],
					 is_l2)) != 0)
		{
		  n_left_from -= 1;
		  bi += 1;
		  ack_number = ret;
		  continue;
		}
	      else
		break;
	    }
	  else
	    break;
	}

      if (bi >= 2)
	{
	  gro_fixup_header (vm, b[0], ack_number, is_l2);
	}
    }
  return bi;
}
#endif /* included_gro_func_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */