diff options
author | Florin Coras <fcoras@cisco.com> | 2017-07-31 17:18:03 -0700 |
---|---|---|
committer | Florin Coras <fcoras@cisco.com> | 2017-08-02 01:49:39 -0700 |
commit | 66b11318a1e5f24880e3ec77c95d70647732a4a8 (patch) | |
tree | 5711f1b28863d0a2130f6370f39c7777ea34b07e /src/vnet/tcp | |
parent | fdbc38249a8c672937a74667dcfaafa2cfd292e7 (diff) |
Fix tcp tx buffer allocation
- Make tcp output buffer allocation macro an inline function
- Use per ip version per thread tx frames for retransmits and timer
events
- Fix / parameterize tcp data structure preallocation
- Add a couple of gdb-callable show commands
- Fix local endpoint cleanup
Change-Id: I67b47b7570aa14cb4634b6fd93c57cd2eacbfa29
Signed-off-by: Florin Coras <fcoras@cisco.com>
Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'src/vnet/tcp')
-rw-r--r-- | src/vnet/tcp/builtin_client.c | 39 | ||||
-rw-r--r-- | src/vnet/tcp/tcp.c | 52 | ||||
-rw-r--r-- | src/vnet/tcp/tcp.h | 12 | ||||
-rw-r--r-- | src/vnet/tcp/tcp_input.c | 2 | ||||
-rw-r--r-- | src/vnet/tcp/tcp_output.c | 188 |
5 files changed, 203 insertions, 90 deletions
diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 27e20f8e8e5..48daffb41de 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -597,8 +597,9 @@ clients_connect (vlib_main_t * vm, u8 * uri, u32 n_clients) a->mp = 0; vnet_connect_uri (a); - /* Crude pacing for call setups, 100k/sec */ - vlib_process_suspend (vm, 10e-6); + /* Crude pacing for call setups */ + if ((i % 4) == 0) + vlib_process_suspend (vm, 10e-6); } } @@ -612,8 +613,10 @@ test_tcp_clients_command_fn (vlib_main_t * vm, uword *event_data = 0, event_type; u8 *default_connect_uri = (u8 *) "tcp://6.0.1.1/1234", *uri; u64 tmp, total_bytes; - f64 cli_timeout = 20.0, delta; + f64 test_timeout = 20.0, syn_timeout = 20.0, delta; + f64 time_before_connects; u32 n_clients = 1; + int preallocate_sessions = 0; char *transfer_type; int i; @@ -640,7 +643,9 @@ test_tcp_clients_command_fn (vlib_main_t * vm, ; else if (unformat (input, "uri %s", &tm->connect_uri)) ; - else if (unformat (input, "cli-timeout %f", &cli_timeout)) + else if (unformat (input, "test-timeout %f", &test_timeout)) + ; + else if (unformat (input, "syn-timeout %f", &syn_timeout)) ; else if (unformat (input, "no-return")) tm->no_return = 1; @@ -657,6 +662,8 @@ test_tcp_clients_command_fn (vlib_main_t * vm, tm->private_segment_size = tmp; else if (unformat (input, "preallocate-fifos")) tm->prealloc_fifos = 1; + else if (unformat (input, "preallocate-sessions")) + preallocate_sessions = 1; else if (unformat (input, "client-batch %d", &tm->connections_per_batch)) ; @@ -674,6 +681,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, return clib_error_return (0, "failed init"); } + tm->ready_connections = 0; tm->expected_connections = n_clients; tm->rx_total = 0; @@ -705,11 +713,21 @@ test_tcp_clients_command_fn (vlib_main_t * vm, vlib_node_set_state (vlib_mains[i], builtin_client_node.index, VLIB_NODE_STATE_POLLING); + if (preallocate_sessions) + { + session_t *sp __attribute__ ((unused)); + for (i = 0; i < n_clients; i++) + pool_get (tm->sessions, sp); + for (i = 0; i < n_clients; i++) + pool_put_index (tm->sessions, i); + } + /* Fire off connect requests */ + time_before_connects = vlib_time_now (vm); clients_connect (vm, uri, n_clients); /* Park until the sessions come up, or ten seconds elapse... */ - vlib_process_wait_for_event_or_clock (vm, 10 /* timeout, seconds */ ); + vlib_process_wait_for_event_or_clock (vm, syn_timeout); event_type = vlib_process_get_events (vm, &event_data); switch (event_type) { @@ -719,6 +737,15 @@ test_tcp_clients_command_fn (vlib_main_t * vm, goto cleanup; case 1: + delta = vlib_time_now (vm) - time_before_connects; + + if (delta != 0.0) + { + vlib_cli_output + (vm, "%d three-way handshakes in %.2f seconds, %.2f/sec", + n_clients, delta, ((f64) n_clients) / delta); + } + tm->test_start_time = vlib_time_now (tm->vlib_main); vlib_cli_output (vm, "Test started at %.6f", tm->test_start_time); break; @@ -729,7 +756,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, } /* Now wait for the sessions to finish... */ - vlib_process_wait_for_event_or_clock (vm, cli_timeout); + vlib_process_wait_for_event_or_clock (vm, test_timeout); event_type = vlib_process_get_events (vm, &event_data); switch (event_type) { diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 59b20747da6..8e2eb9f4e3d 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -173,7 +173,7 @@ tcp_connection_cleanup (tcp_connection_t * tc) /* Cleanup local endpoint if this was an active connect */ tepi = transport_endpoint_lookup (&tm->local_endpoints_table, &tc->c_lcl_ip, - tc->c_lcl_port); + clib_net_to_host_u16 (tc->c_lcl_port)); if (tepi != TRANSPORT_ENDPOINT_INVALID_INDEX) { tep = pool_elt_at_index (tm->local_endpoints, tepi); @@ -367,25 +367,24 @@ tcp_allocate_local_port (ip46_address_t * ip) { tcp_main_t *tm = vnet_get_tcp_main (); transport_endpoint_t *tep; - u32 time_now, tei; + u32 tei; u16 min = 1024, max = 65535; /* XXX configurable ? */ - int tries; + int tries, limit; - tries = max - min; - time_now = tcp_time_now (); + limit = max - min; /* Only support active opens from thread 0 */ ASSERT (vlib_get_thread_index () == 0); /* Search for first free slot */ - for (; tries >= 0; tries--) + for (tries = 0; tries < limit; tries++) { u16 port = 0; /* Find a port in the specified range */ while (1) { - port = random_u32 (&time_now) & PORT_MASK; + port = random_u32 (&tm->port_allocator_seed) & PORT_MASK; if (PREDICT_TRUE (port >= min && port < max)) break; } @@ -1189,8 +1188,9 @@ tcp_main_enable (vlib_main_t * vm) vlib_thread_main_t *vtm = vlib_get_thread_main (); clib_error_t *error = 0; u32 num_threads; - int thread, i; + int i, thread; tcp_connection_t *tc __attribute__ ((unused)); + u32 preallocated_connections_per_thread; if ((error = vlib_call_init_function (vm, ip_main_init))) return error; @@ -1224,14 +1224,26 @@ tcp_main_enable (vlib_main_t * vm) vec_validate (tm->connections, num_threads - 1); /* - * Preallocate connections + * Preallocate connections. Assume that thread 0 won't + * use preallocated threads when running multi-core */ - for (thread = 0; thread < num_threads; thread++) + if (num_threads == 1) { - for (i = 0; i < tm->preallocated_connections; i++) + thread = 0; + preallocated_connections_per_thread = tm->preallocated_connections; + } + else + { + thread = 1; + preallocated_connections_per_thread = + tm->preallocated_connections / (num_threads - 1); + } + for (; thread < num_threads; thread++) + { + for (i = 0; i < preallocated_connections_per_thread; i++) pool_get (tm->connections[thread], tc); - for (i = 0; i < tm->preallocated_connections; i++) + for (i = 0; i < preallocated_connections_per_thread; i++) pool_put_index (tm->connections[thread], i); } @@ -1257,13 +1269,21 @@ tcp_main_enable (vlib_main_t * vm) / TCP_TSTAMP_RESOLUTION; clib_bihash_init_24_8 (&tm->local_endpoints_table, "local endpoint table", - 200000 /* $$$$ config parameter nbuckets */ , - (64 << 20) /*$$$ config parameter table size */ ); + 1000000 /* $$$$ config parameter nbuckets */ , + (512 << 20) /*$$$ config parameter table size */ ); + + /* Initialize [port-allocator] random number seed */ + tm->port_allocator_seed = (u32) clib_cpu_time_now (); + if (num_threads > 1) { clib_spinlock_init (&tm->half_open_lock); clib_spinlock_init (&tm->local_endpoints_lock); } + + vec_validate (tm->tx_frames[0], num_threads - 1); + vec_validate (tm->tx_frames[1], num_threads - 1); + return error; } @@ -1289,16 +1309,12 @@ clib_error_t * tcp_init (vlib_main_t * vm) { tcp_main_t *tm = vnet_get_tcp_main (); - - tm->vnet_main = vnet_get_main (); tm->is_enabled = 0; - return 0; } VLIB_INIT_FUNCTION (tcp_init); - static clib_error_t * tcp_config_fn (vlib_main_t * vm, unformat_input_t * input) { diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 4fa681f8cc1..997df76f545 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -369,6 +369,8 @@ typedef struct _tcp_main /** per-worker tx buffer free lists */ u32 **tx_buffers; + /** per-worker tx frames to 4/6 output nodes */ + vlib_frame_t **tx_frames[2]; /* Per worker-thread timer wheel for connections timers */ tw_timer_wheel_16t_2w_512sl_t *timer_wheels; @@ -400,11 +402,8 @@ typedef struct _tcp_main u32 last_v6_address_rotor; ip6_address_t *ip6_src_addresses; - /* convenience */ - vlib_main_t *vlib_main; - vnet_main_t *vnet_main; - ip4_main_t *ip4_main; - ip6_main_t *ip6_main; + /** Port allocator random number generator seed */ + u32 port_allocator_seed; } tcp_main_t; extern tcp_main_t tcp_main; @@ -493,6 +492,8 @@ void tcp_send_fin (tcp_connection_t * tc); void tcp_init_mss (tcp_connection_t * tc); void tcp_update_snd_mss (tcp_connection_t * tc); void tcp_update_rto (tcp_connection_t * tc); +void tcp_flush_frame_to_output (vlib_main_t * vm, u8 thread_index, u8 is_ip4); +void tcp_flush_frames_to_output (u8 thread_index); always_inline u32 tcp_end_seq (tcp_header_t * th, u32 len) @@ -614,6 +615,7 @@ tcp_update_time (f64 now, u32 thread_index) { tw_timer_expire_timers_16t_2w_512sl (&tcp_main.timer_wheels[thread_index], now); + tcp_flush_frames_to_output (thread_index); } u32 tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b); diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 6c59d70f072..29f4f08d72c 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -1751,6 +1751,8 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, errors = session_manager_flush_enqueue_events (my_thread_index); tcp_established_inc_counter (vm, is_ip4, TCP_ERROR_EVENT_FIFO_FULL, errors); + tcp_flush_frame_to_output (vm, my_thread_index, is_ip4); + return from_frame->n_vectors; } diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index ad13493a14c..f8fbb8a9e69 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -436,34 +436,41 @@ tcp_init_mss (tcp_connection_t * tc) tc->snd_mss -= TCP_OPTION_LEN_TIMESTAMP; } -#define tcp_get_free_buffer_index(tm, bidx) \ -do { \ - u32 *my_tx_buffers, n_free_buffers; \ - u32 thread_index = vlib_get_thread_index(); \ - my_tx_buffers = tm->tx_buffers[thread_index]; \ - if (PREDICT_FALSE(vec_len (my_tx_buffers) == 0)) \ - { \ - n_free_buffers = 32; /* TODO config or macro */ \ - vec_validate (my_tx_buffers, n_free_buffers - 1); \ - _vec_len(my_tx_buffers) = vlib_buffer_alloc_from_free_list ( \ - vlib_get_main(), my_tx_buffers, n_free_buffers, \ - VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); \ - tm->tx_buffers[thread_index] = my_tx_buffers; \ - } \ - /* buffer shortage */ \ - if (PREDICT_FALSE (vec_len (my_tx_buffers) == 0)) \ - return; \ - *bidx = my_tx_buffers[_vec_len (my_tx_buffers)-1]; \ - _vec_len (my_tx_buffers) -= 1; \ -} while (0) - -#define tcp_return_buffer(tm) \ -do { \ - u32 *my_tx_buffers; \ - u32 thread_index = vlib_get_thread_index(); \ - my_tx_buffers = tm->tx_buffers[thread_index]; \ - _vec_len (my_tx_buffers) +=1; \ -} while (0) +always_inline int +tcp_get_free_buffer_index (tcp_main_t * tm, u32 * bidx) +{ + u32 *my_tx_buffers, n_free_buffers; + u32 thread_index = vlib_get_thread_index (); + my_tx_buffers = tm->tx_buffers[thread_index]; + if (PREDICT_FALSE (vec_len (my_tx_buffers) == 0)) + { + n_free_buffers = VLIB_FRAME_SIZE; + vec_validate (my_tx_buffers, n_free_buffers - 1); + _vec_len (my_tx_buffers) = + vlib_buffer_alloc_from_free_list (vlib_get_main (), my_tx_buffers, + n_free_buffers, + VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); + /* buffer shortage, report failure */ + if (vec_len (my_tx_buffers) == 0) + { + clib_warning ("out of buffers"); + return -1; + } + tm->tx_buffers[thread_index] = my_tx_buffers; + } + *bidx = my_tx_buffers[_vec_len (my_tx_buffers) - 1]; + _vec_len (my_tx_buffers) -= 1; + return 0; +} + +always_inline void +tcp_return_buffer (tcp_main_t * tm) +{ + u32 *my_tx_buffers; + u32 thread_index = vlib_get_thread_index (); + my_tx_buffers = tm->tx_buffers[thread_index]; + _vec_len (my_tx_buffers) += 1; +} always_inline void tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b) @@ -706,7 +713,9 @@ tcp_send_reset (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4) ip4_header_t *ih4, *pkt_ih4; ip6_header_t *ih6, *pkt_ih6; - tcp_get_free_buffer_index (tm, &bi); + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) + return; + b = vlib_get_buffer (vm, bi); /* Leave enough space for headers */ @@ -811,7 +820,9 @@ tcp_send_syn (tcp_connection_t * tc) u16 initial_wnd; tcp_options_t snd_opts; - tcp_get_free_buffer_index (tm, &bi); + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) + return; + b = vlib_get_buffer (vm, bi); /* Leave enough space for headers */ @@ -854,8 +865,11 @@ tcp_send_syn (tcp_connection_t * tc) } always_inline void -tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4) +tcp_enqueue_to_output_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, + u8 is_ip4, u8 flush) { + tcp_main_t *tm = vnet_get_tcp_main (); + u32 thread_index = vlib_get_thread_index (); u32 *to_next, next_index; vlib_frame_t *f; @@ -872,12 +886,62 @@ tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4) b->pre_data[1] = next_index; } - /* Enqueue the packet */ - f = vlib_get_frame_to_node (vm, next_index); + /* Get frame to v4/6 output node */ + f = tm->tx_frames[!is_ip4][thread_index]; + if (!f) + { + f = vlib_get_frame_to_node (vm, next_index); + ASSERT (f); + tm->tx_frames[!is_ip4][thread_index] = f; + } to_next = vlib_frame_vector_args (f); - to_next[0] = bi; - f->n_vectors = 1; - vlib_put_frame_to_node (vm, next_index, f); + to_next[f->n_vectors] = bi; + f->n_vectors += 1; + if (flush || f->n_vectors == VLIB_FRAME_SIZE) + { + vlib_put_frame_to_node (vm, next_index, f); + tm->tx_frames[!is_ip4][thread_index] = 0; + } +} + +always_inline void +tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4) +{ + tcp_enqueue_to_output_i (vm, b, bi, is_ip4, 0); +} + +always_inline void +tcp_enqueue_to_output_now (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, + u8 is_ip4) +{ + tcp_enqueue_to_output_i (vm, b, bi, is_ip4, 1); +} + +/** + * Flush tx frame populated by retransmits and timer pops + */ +void +tcp_flush_frame_to_output (vlib_main_t * vm, u8 thread_index, u8 is_ip4) +{ + if (tcp_main.tx_frames[!is_ip4][thread_index]) + { + u32 next_index; + next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; + vlib_put_frame_to_node (vm, next_index, + tcp_main.tx_frames[!is_ip4][thread_index]); + tcp_main.tx_frames[!is_ip4][thread_index] = 0; + } +} + +/** + * Flush both v4 and v6 tx frames for thread index + */ +void +tcp_flush_frames_to_output (u8 thread_index) +{ + vlib_main_t *vm = vlib_get_main (); + tcp_flush_frame_to_output (vm, thread_index, 1); + tcp_flush_frame_to_output (vm, thread_index, 0); } /** @@ -891,14 +955,15 @@ tcp_send_fin (tcp_connection_t * tc) tcp_main_t *tm = vnet_get_tcp_main (); vlib_main_t *vm = vlib_get_main (); - tcp_get_free_buffer_index (tm, &bi); + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) + return; b = vlib_get_buffer (vm, bi); /* Leave enough space for headers */ vlib_buffer_make_headroom (b, MAX_HDRS_LEN); tcp_make_fin (tc, b); - tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); + tcp_enqueue_to_output_now (vm, b, bi, tc->c_is_ip4); tc->flags |= TCP_CONN_FINSNT; tcp_retransmit_timer_force_update (tc); TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc); @@ -981,7 +1046,8 @@ tcp_send_ack (tcp_connection_t * tc) u32 bi; /* Get buffer */ - tcp_get_free_buffer_index (tm, &bi); + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) + return; b = vlib_get_buffer (vm, bi); /* Fill in the ACK */ @@ -1108,7 +1174,9 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) /* Go back to first un-acked byte */ tc->snd_nxt = tc->snd_una; - tcp_get_free_buffer_index (tm, &bi); + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) + return; + b = vlib_get_buffer (vm, bi); if (tc->state >= TCP_STATE_ESTABLISHED) @@ -1116,6 +1184,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) /* Lost FIN, retransmit and return */ if (tc->flags & TCP_CONN_FINSNT) { + tcp_return_buffer (tm); tcp_send_fin (tc); return; } @@ -1143,6 +1212,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) tcp_retransmit_timer_set (tc); ASSERT (0 || (tc->rto_boff > 1 && tc->snd_una == tc->snd_congestion)); + tcp_return_buffer (tm); return; } @@ -1164,6 +1234,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) clib_warning ("could not remove half-open connection"); ASSERT (0); } + tcp_return_buffer (tm); return; } @@ -1185,6 +1256,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) { ASSERT (tc->state == TCP_STATE_CLOSED); clib_warning ("connection closed ..."); + tcp_return_buffer (tm); return; } @@ -1254,7 +1326,9 @@ tcp_timer_persist_handler (u32 index) tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); /* Try to force the first unsent segment */ - tcp_get_free_buffer_index (tm, &bi); + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) + return; + b = vlib_get_buffer (vm, bi); tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una); @@ -1300,7 +1374,9 @@ tcp_retransmit_first_unacked (tcp_connection_t * tc) tc->snd_nxt = tc->snd_una; /* Get buffer */ - tcp_get_free_buffer_index (tm, &bi); + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) + return; + b = vlib_get_buffer (vm, bi); TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 2); @@ -1344,9 +1420,10 @@ tcp_fast_retransmit_sack (tcp_connection_t * tc) hole = scoreboard_get_hole (sb, sb->cur_rxt_hole); while (hole && snd_space > 0) { - tcp_get_free_buffer_index (tm, &bi); - b = vlib_get_buffer (vm, bi); + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) + return; + b = vlib_get_buffer (vm, bi); hole = scoreboard_next_rxt_hole (sb, hole, tcp_fastrecovery_sent_1_smss (tc), &can_rescue, &snd_limited); @@ -1414,9 +1491,9 @@ tcp_fast_retransmit_no_sack (tcp_connection_t * tc) while (snd_space > 0) { - tcp_get_free_buffer_index (tm, &bi); + if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) + return; b = vlib_get_buffer (vm, bi); - offset += n_written; n_written = tcp_prepare_retransmit_segment (tc, b, offset, snd_space); @@ -1506,32 +1583,21 @@ tcp46_output_inline (vlib_main_t * vm, if (is_ip4) { - ip4_header_t *ih0; - ih0 = vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, - &tc0->c_rmt_ip4, IP_PROTOCOL_TCP, - 1); - b0->flags |= - VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM | - VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; - vnet_buffer (b0)->l3_hdr_offset = (u8 *) ih0 - b0->data; + vlib_buffer_push_ip4 (vm, b0, &tc0->c_lcl_ip4, &tc0->c_rmt_ip4, + IP_PROTOCOL_TCP, 1); + b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; vnet_buffer (b0)->l4_hdr_offset = (u8 *) th0 - b0->data; th0->checksum = 0; } else { ip6_header_t *ih0; - int bogus = ~0; - ih0 = vlib_buffer_push_ip6 (vm, b0, &tc0->c_lcl_ip6, &tc0->c_rmt_ip6, IP_PROTOCOL_TCP); - - b0->flags |= VNET_BUFFER_F_IS_IP6 | - VNET_BUFFER_F_OFFLOAD_IP_CKSUM | - VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; + b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM; vnet_buffer (b0)->l3_hdr_offset = (u8 *) ih0 - b0->data; vnet_buffer (b0)->l4_hdr_offset = (u8 *) th0 - b0->data; th0->checksum = 0; - ASSERT (!bogus); } /* Filter out DUPACKs if there are no OOO segments left */ |