diff options
author | Florin Coras <fcoras@cisco.com> | 2019-03-12 18:58:54 -0700 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2019-03-22 17:30:29 +0000 |
commit | 4759683744d079f868a6bcca8084eba8466620d7 (patch) | |
tree | 6795035edf69463107dfbaa54eae3183cdc62356 /src | |
parent | 881d13ede7336e5b16601033e5dd9aeb94c795e3 (diff) |
tcp: improve handling of snd_nxt
- avoid changing snd_nxt when doing fast retransmits
- use snd_una_max only to keep track of the max seq number sent
- simplify future ack testing
Change-Id: I3580ad3aefe30128486c3375d0ac3f3f62c04c5e
Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/plugins/unittest/tcp_test.c | 4 | ||||
-rw-r--r-- | src/vnet/tcp/tcp.c | 4 | ||||
-rw-r--r-- | src/vnet/tcp/tcp.h | 4 | ||||
-rw-r--r-- | src/vnet/tcp/tcp_input.c | 112 | ||||
-rw-r--r-- | src/vnet/tcp/tcp_output.c | 95 |
5 files changed, 93 insertions, 126 deletions
diff --git a/src/plugins/unittest/tcp_test.c b/src/plugins/unittest/tcp_test.c index c144de4398d..fb01adb6c1f 100644 --- a/src/plugins/unittest/tcp_test.c +++ b/src/plugins/unittest/tcp_test.c @@ -239,7 +239,7 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) */ vec_reset_length (tc->rcv_opts.sacks); tc->snd_una += sb->snd_una_adv; - tc->snd_una_max = 1900; + tc->snd_nxt = tc->snd_una_max = 1900; for (i = 0; i < 5; i++) { block.start = i * 100 + 1200; @@ -416,7 +416,7 @@ tcp_test_sack_rx (vlib_main_t * vm, unformat_input_t * input) * snd_una = 1000 and snd_una_max = 1600 */ tc->snd_una = 1000; - tc->snd_una_max = 1600; + tc->snd_nxt = tc->snd_una_max = 1600; vec_reset_length (tc->rcv_opts.sacks); block.start = 1200; block.end = 1500; diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 215bcbb159e..39d683c78e7 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -1164,7 +1164,7 @@ tcp_session_flush_data (transport_connection_t * tconn) if (tc->flags & TCP_CONN_PSH_PENDING) return; tc->flags |= TCP_CONN_PSH_PENDING; - tc->psh_seq = tc->snd_una_max + transport_max_tx_dequeue (tconn) - 1; + tc->psh_seq = tc->snd_una + transport_max_tx_dequeue (tconn) - 1; } /* *INDENT-OFF* */ @@ -1296,7 +1296,7 @@ tcp_timer_waitclose_handler (u32 conn_index) * and switch to LAST_ACK. */ tcp_cong_recovery_off (tc); /* Make sure we don't try to send unsent data */ - tc->snd_una_max = tc->snd_nxt = tc->snd_una; + tc->snd_nxt = tc->snd_una; tcp_send_fin (tc); tcp_connection_set_state (tc, TCP_STATE_LAST_ACK); diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 07ceb5a67a6..9e2c6d8c4f5 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -680,7 +680,7 @@ tcp_flight_size (const tcp_connection_t * tc) { int flight_size; - flight_size = (int) (tc->snd_una_max - tc->snd_una) - tcp_bytes_out (tc) + flight_size = (int) (tc->snd_nxt - tc->snd_una) - tcp_bytes_out (tc) + tc->snd_rxt_bytes; if (flight_size < 0) @@ -924,7 +924,7 @@ tcp_persist_timer_reset (tcp_connection_t * tc) always_inline void tcp_retransmit_timer_update (tcp_connection_t * tc) { - if (tc->snd_una == tc->snd_una_max) + if (tc->snd_una == tc->snd_nxt) { tcp_retransmit_timer_reset (tc); if (tc->snd_wnd < tc->snd_mss) diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 5235c20a565..c27631f3965 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -409,7 +409,7 @@ tcp_rcv_ack_is_acceptable (tcp_connection_t * tc0, vlib_buffer_t * tb0) { /* SND.UNA =< SEG.ACK =< SND.NXT */ return (seq_leq (tc0->snd_una, vnet_buffer (tb0)->tcp.ack_number) - && seq_leq (vnet_buffer (tb0)->tcp.ack_number, tc0->snd_nxt)); + && seq_leq (vnet_buffer (tb0)->tcp.ack_number, tc0->snd_una_max)); } /** @@ -604,7 +604,7 @@ tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd, u32 prev_snd_una) { return ((vnet_buffer (b)->tcp.ack_number == prev_snd_una) - && seq_gt (tc->snd_una_max, tc->snd_una) + && seq_gt (tc->snd_nxt, tc->snd_una) && (vnet_buffer (b)->tcp.seq_end == vnet_buffer (b)->tcp.seq_number) && (prev_snd_wnd == tc->snd_wnd)); } @@ -913,7 +913,7 @@ tcp_scoreboard_is_sane_post_recovery (tcp_connection_t * tc) sack_scoreboard_hole_t *hole; hole = scoreboard_first_hole (&tc->sack_sb); return (!hole || (seq_geq (hole->start, tc->snd_una) - && seq_lt (hole->end, tc->snd_una_max))); + && seq_lt (hole->end, tc->snd_nxt))); } #ifndef CLIB_MARCH_VARIANT @@ -943,8 +943,8 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) if (seq_lt (blk->start, blk->end) && seq_gt (blk->start, tc->snd_una) && seq_gt (blk->start, ack) - && seq_lt (blk->start, tc->snd_una_max) - && seq_leq (blk->end, tc->snd_una_max)) + && seq_lt (blk->start, tc->snd_nxt) + && seq_leq (blk->end, tc->snd_nxt)) { blk++; continue; @@ -979,7 +979,7 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { /* If no holes, insert the first that covers all outstanding bytes */ last_hole = scoreboard_insert_hole (sb, TCP_INVALID_SACK_HOLE_INDEX, - tc->snd_una, tc->snd_una_max); + tc->snd_una, tc->snd_nxt); sb->tail = scoreboard_hole_index (sb, last_hole); tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1]; sb->high_sacked = tmp.end; @@ -990,17 +990,17 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) * last hole end */ tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1]; last_hole = scoreboard_last_hole (sb); - if (seq_gt (tc->snd_una_max, last_hole->end)) + if (seq_gt (tc->snd_nxt, last_hole->end)) { if (seq_geq (last_hole->start, sb->high_sacked)) { - last_hole->end = tc->snd_una_max; + last_hole->end = tc->snd_nxt; } /* New hole after high sacked block */ - else if (seq_lt (sb->high_sacked, tc->snd_una_max)) + else if (seq_lt (sb->high_sacked, tc->snd_nxt)) { scoreboard_insert_hole (sb, sb->tail, sb->high_sacked, - tc->snd_una_max); + tc->snd_nxt); } } /* Keep track of max byte sacked for when the last hole @@ -1078,8 +1078,7 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) if (pool_elts (sb->holes) == 1) { hole = scoreboard_first_hole (sb); - if (hole->start == ack + sb->snd_una_adv - && hole->end == tc->snd_una_max) + if (hole->start == ack + sb->snd_una_adv && hole->end == tc->snd_nxt) scoreboard_remove_hole (sb, hole); } @@ -1088,8 +1087,8 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) - (old_sacked_bytes - sb->last_bytes_delivered); ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes || tcp_in_recovery (tc)); ASSERT (sb->sacked_bytes == 0 || tcp_in_recovery (tc) - || sb->sacked_bytes < tc->snd_una_max - seq_max (tc->snd_una, ack)); - ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_una_max + || sb->sacked_bytes < tc->snd_nxt - seq_max (tc->snd_una, ack)); + ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_nxt - seq_max (tc->snd_una, ack) || tcp_in_recovery (tc)); ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc) || sb->holes[sb->head].start == ack + sb->snd_una_adv); @@ -1146,7 +1145,7 @@ void tcp_cc_init_congestion (tcp_connection_t * tc) { tcp_fastrecovery_on (tc); - tc->snd_congestion = tc->snd_una_max; + tc->snd_congestion = tc->snd_nxt; tc->cwnd_acc_bytes = 0; tc->snd_rxt_bytes = 0; tc->prev_ssthresh = tc->ssthresh; @@ -1162,7 +1161,6 @@ tcp_cc_recovery_exit (tcp_connection_t * tc) tc->rto_boff = 0; tcp_update_rto (tc); tc->snd_rxt_ts = 0; - tc->snd_nxt = tc->snd_una_max; tc->rtt_ts = 0; tcp_recovery_off (tc); TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 3); @@ -1175,7 +1173,6 @@ tcp_cc_fastrecovery_exit (tcp_connection_t * tc) tc->cc_algo->recovered (tc); tc->snd_rxt_bytes = 0; tc->rcv_dupacks = 0; - tc->snd_nxt = tc->snd_una_max; tc->snd_rxt_bytes = 0; tc->rtt_ts = 0; @@ -1191,12 +1188,16 @@ tcp_cc_congestion_undo (tcp_connection_t * tc) { tc->cwnd = tc->prev_cwnd; tc->ssthresh = tc->prev_ssthresh; - tc->snd_nxt = tc->snd_una_max; tc->rcv_dupacks = 0; if (tcp_in_recovery (tc)) - tcp_cc_recovery_exit (tc); + { + tcp_cc_recovery_exit (tc); + tc->snd_nxt = seq_max (tc->snd_nxt, tc->snd_congestion); + } else if (tcp_in_fastrecovery (tc)) - tcp_cc_fastrecovery_exit (tc); + { + tcp_cc_fastrecovery_exit (tc); + } ASSERT (tc->rto_boff == 0); TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 5); } @@ -1374,8 +1375,7 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) else if (is_dack && !tcp_in_recovery (tc)) { TCP_EVT_DBG (TCP_EVT_DUPACK_RCVD, tc, 1); - ASSERT (tc->snd_una != tc->snd_una_max - || tc->sack_sb.last_sacked_bytes); + ASSERT (tc->snd_una != tc->snd_nxt || tc->sack_sb.last_sacked_bytes); tc->rcv_dupacks++; @@ -1482,8 +1482,6 @@ partial_ack: return; } - tc->snd_nxt = tc->snd_una_max; - /* Treat as congestion avoidance ack */ tcp_cc_rcv_ack (tc); return; @@ -1565,38 +1563,18 @@ tcp_rcv_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b, /* If the ACK acks something not yet sent (SEG.ACK > SND.NXT) */ if (PREDICT_FALSE (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_nxt))) { - /* When we entered cong recovery, we reset snd_nxt to snd_una. Seems - * peer still has the data so accept the ack */ - if (tcp_in_cong_recovery (tc) - && seq_leq (vnet_buffer (b)->tcp.ack_number, - tc->snd_una + tc->snd_wnd)) + /* We've probably entered recovery and the peer still has some + * of the data we've sent. Update snd_nxt and accept the ack */ + if (seq_leq (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)) { tc->snd_nxt = vnet_buffer (b)->tcp.ack_number; - if (seq_gt (tc->snd_nxt, tc->snd_una_max)) - tc->snd_una_max = tc->snd_nxt; goto process_ack; } - /* If we have outstanding data and this is within the window, accept it, - * probably retransmit has timed out. Otherwise ACK segment and then - * drop it */ - if (seq_gt (vnet_buffer (b)->tcp.ack_number, tc->snd_una_max)) - { - tcp_program_ack (wrk, tc); - *error = TCP_ERROR_ACK_FUTURE; - TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 0, - vnet_buffer (b)->tcp.ack_number); - return -1; - } - - TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 2, + *error = TCP_ERROR_ACK_FUTURE; + TCP_EVT_DBG (TCP_EVT_ACK_RCV_ERR, tc, 0, vnet_buffer (b)->tcp.ack_number); - - tc->snd_nxt = vnet_buffer (b)->tcp.ack_number; - if (seq_gt (tc->snd_nxt, tc->snd_una_max)) - tc->snd_una_max = tc->snd_nxt; - - goto process_ack; + return -1; } /* If old ACK, probably it's an old dupack */ @@ -1611,10 +1589,11 @@ tcp_rcv_ack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_buffer_t * b, return 0; } +process_ack: + /* * Looks okay, process feedback */ -process_ack: if (tcp_opts_sack_permitted (&tc->rcv_opts)) tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number); @@ -1767,13 +1746,7 @@ tcp_update_sack_list (tcp_connection_t * tc, u32 start, u32 end) /* Save to new SACK list if we have space. */ if (vec_len (new_list) < TCP_MAX_SACK_BLOCKS) - { - vec_add1 (new_list, tc->snd_sacks[i]); - } - else - { - clib_warning ("sack discarded"); - } + vec_add1 (new_list, tc->snd_sacks[i]); } ASSERT (vec_len (new_list) <= TCP_MAX_SACK_BLOCKS); @@ -2799,7 +2772,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tcp_send_fin (tc0); } /* If FIN is ACKed */ - else if (tc0->snd_una == tc0->snd_una_max) + else if (tc0->snd_una == tc0->snd_nxt) { tcp_connection_set_state (tc0, TCP_STATE_FIN_WAIT_2); @@ -2845,8 +2818,21 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* In addition to the processing for the ESTABLISHED state, if * the ACK acknowledges our FIN then enter the TIME-WAIT state, * otherwise ignore the segment. */ - if (tcp_rcv_ack (wrk, tc0, b0, tcp0, &error0)) - goto drop; + if (!tcp_rcv_ack_is_acceptable (tc0, b0)) + { + error0 = TCP_ERROR_ACK_INVALID; + goto drop; + } + + error0 = TCP_ERROR_ACK_OK; + tc0->snd_una = vnet_buffer (b0)->tcp.ack_number; + /* Ack moved snd_una beyond snd_nxt so reprogram fin */ + if (seq_gt (tc0->snd_una, tc0->snd_nxt)) + { + tc0->snd_nxt = tc0->snd_una; + tc0->flags &= ~TCP_CONN_FINSNT; + goto drop; + } tcp_connection_timers_reset (tc0); tcp_connection_set_state (tc0, TCP_STATE_TIME_WAIT); @@ -2867,7 +2853,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, error0 = TCP_ERROR_ACK_OK; tc0->snd_una = vnet_buffer (b0)->tcp.ack_number; /* Apparently our ACK for the peer's FIN was lost */ - if (is_fin0 && tc0->snd_una != tc0->snd_una_max) + if (is_fin0 && tc0->snd_una != tc0->snd_nxt) { tcp_send_fin (tc0); goto drop; @@ -2971,7 +2957,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tcp_fastrecovery_off (tc0); tcp_recovery_off (tc0); tcp_connection_timers_reset (tc0); - tc0->snd_nxt = tc0->snd_una_max = tc0->snd_una; + tc0->snd_nxt = tc0->snd_una; } tcp_send_fin (tc0); } diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index ce7b50ac3e6..4b38649c171 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -183,7 +183,7 @@ tcp_update_rcv_wnd (tcp_connection_t * tc) /** * Compute and return window to advertise, scaled as per RFC1323 */ -static u32 +static inline u32 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state) { if (state < TCP_STATE_ESTABLISHED) @@ -1052,14 +1052,14 @@ tcp_send_fin (tcp_connection_t * tc) fin_snt = tc->flags & TCP_CONN_FINSNT; if (fin_snt) - tc->snd_nxt = tc->snd_una; + tc->snd_nxt -= 1; if (PREDICT_FALSE (!vlib_buffer_alloc (vm, &bi, 1))) { /* Out of buffers so program fin retransmit ASAP */ tcp_timer_update (tc, TCP_TIMER_RETRANSMIT, 1); if (fin_snt) - tc->snd_nxt = tc->snd_una_max; + tc->snd_nxt += 1; else /* Make sure retransmit retries a fin not data */ tc->flags |= TCP_CONN_FINSNT; @@ -1072,18 +1072,13 @@ tcp_send_fin (tcp_connection_t * tc) tcp_make_fin (tc, b); tcp_enqueue_to_output_now (wrk, b, bi, tc->c_is_ip4); TCP_EVT_DBG (TCP_EVT_FIN_SENT, tc); - + /* Account for the FIN */ + tc->snd_nxt += 1; if (!fin_snt) { tc->flags |= TCP_CONN_FINSNT; tc->flags &= ~TCP_CONN_FINPNDG; - /* Account for the FIN */ - tc->snd_una_max += 1; - tc->snd_nxt = tc->snd_una_max; - } - else - { - tc->snd_nxt = tc->snd_una_max; + tc->snd_una_max = seq_max (tc->snd_una_max, tc->snd_nxt); } } @@ -1092,8 +1087,8 @@ tcp_send_fin (tcp_connection_t * tc) * for segments with data, not for 'control' packets. */ always_inline void -tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, - tcp_state_t next_state, u8 compute_opts, u8 maybe_burst) +tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, u32 snd_nxt, + u8 compute_opts, u8 maybe_burst, u8 update_snd_nxt) { u8 tcp_hdr_opts_len, flags = TCP_FLAG_ACK; u32 advertise_wnd, data_len; @@ -1115,15 +1110,15 @@ tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, if (maybe_burst) advertise_wnd = tc->rcv_wnd >> tc->rcv_wscale; else - advertise_wnd = tcp_window_to_advertise (tc, next_state); + advertise_wnd = tcp_window_to_advertise (tc, TCP_STATE_ESTABLISHED); if (PREDICT_FALSE (tc->flags & TCP_CONN_PSH_PENDING)) { - if (seq_geq (tc->psh_seq, tc->snd_nxt) - && seq_lt (tc->psh_seq, tc->snd_nxt + data_len)) + if (seq_geq (tc->psh_seq, snd_nxt) + && seq_lt (tc->psh_seq, snd_nxt + data_len)) flags |= TCP_FLAG_PSH; } - th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->snd_nxt, + th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, snd_nxt, tc->rcv_nxt, tcp_hdr_opts_len, flags, advertise_wnd); @@ -1143,7 +1138,8 @@ tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b, * Update connection variables */ - tc->snd_nxt += data_len; + if (update_snd_nxt) + tc->snd_nxt += data_len; tc->rcv_las = tc->rcv_nxt; TCP_EVT_DBG (TCP_EVT_PKTIZE, tc); @@ -1153,9 +1149,9 @@ u32 tcp_session_push_header (transport_connection_t * tconn, vlib_buffer_t * b) { tcp_connection_t *tc = (tcp_connection_t *) tconn; - tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED, /* compute opts */ 0, - /* burst */ 1); - tc->snd_una_max = tc->snd_nxt; + tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, /* burst */ 1, + /* update_snd_nxt */ 1); + tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max); ASSERT (seq_leq (tc->snd_una_max, tc->snd_una + tc->snd_wnd)); tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una); /* If not tracking an ACK, start tracking */ @@ -1322,9 +1318,8 @@ tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, max_deq_bytes); ASSERT (n_bytes == max_deq_bytes); b[0]->current_length = n_bytes; - tcp_push_hdr_i (tc, *b, tc->state, /* compute opts */ 0, /* burst */ 0); - if (seq_gt (tc->snd_nxt, tc->snd_una_max)) - tc->snd_una_max = tc->snd_nxt; + tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0, + /* burst */ 0, /* update_snd_nxt */ 0); } /* Split mss into multiple buffers */ else @@ -1381,9 +1376,8 @@ tcp_prepare_segment (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, b[0]->total_length_not_including_first_buffer += n_peeked; } - tcp_push_hdr_i (tc, *b, tc->state, /* compute opts */ 0, /* burst */ 0); - if (seq_gt (tc->snd_nxt, tc->snd_una_max)) - tc->snd_una_max = tc->snd_nxt; + tcp_push_hdr_i (tc, *b, tc->snd_una + offset, /* compute opts */ 0, + /* burst */ 0, /* update_snd_nxt */ 0); if (PREDICT_FALSE (n_bufs)) { @@ -1475,7 +1469,7 @@ tcp_rxt_timeout_cc (tcp_connection_t * tc) /* Start again from the beginning */ tc->cc_algo->congestion (tc); tc->cwnd = tcp_loss_wnd (tc); - tc->snd_congestion = tc->snd_una_max; + tc->snd_congestion = tc->snd_nxt; tc->rtt_ts = 0; tc->cwnd_acc_bytes = 0; tcp_connection_tx_pacer_reset (tc, tc->cwnd, 2 * tc->snd_mss); @@ -1527,7 +1521,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) /* Shouldn't be here. This condition is tricky because it has to take * into account boff > 0 due to persist timeout. */ - if ((tc->rto_boff == 0 && tc->snd_una == tc->snd_una_max) + if ((tc->rto_boff == 0 && tc->snd_una == tc->snd_nxt) || (tc->rto_boff > 0 && seq_geq (tc->snd_una, tc->snd_congestion) && !tcp_flight_size (tc))) { @@ -1555,10 +1549,9 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) scoreboard_clear (&tc->sack_sb); /* If we've sent beyond snd_congestion, update it */ - if (seq_gt (tc->snd_una_max, tc->snd_congestion)) - tc->snd_congestion = tc->snd_una_max; + tc->snd_congestion = seq_max (tc->snd_nxt, tc->snd_congestion); - tc->snd_una_max = tc->snd_nxt = tc->snd_una; + tc->snd_nxt = tc->snd_una; tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); /* Send one segment. Note that n_bytes may be zero due to buffer @@ -1695,7 +1688,7 @@ tcp_timer_persist_handler (u32 index) return; available_bytes = transport_max_tx_dequeue (&tc->connection); - offset = tc->snd_una_max - tc->snd_una; + offset = tc->snd_nxt - tc->snd_una; /* Reprogram persist if no new bytes available to send. We may have data * next time */ @@ -1737,8 +1730,9 @@ tcp_timer_persist_handler (u32 index) || tc->snd_nxt == tc->snd_una_max || tc->rto_boff > 1)); - tcp_push_hdr_i (tc, b, tc->state, /* compute opts */ 0, /* burst */ 0); - tc->snd_una_max = tc->snd_nxt; + tcp_push_hdr_i (tc, b, tc->snd_nxt, /* compute opts */ 0, + /* burst */ 0, /* update_snd_nxt */ 1); + tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max); tcp_validate_txf_size (tc, tc->snd_una_max - tc->snd_una); tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4); @@ -1752,12 +1746,9 @@ tcp_timer_persist_handler (u32 index) int tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc) { - u32 bi, old_snd_nxt, n_bytes; vlib_main_t *vm = wrk->vm; vlib_buffer_t *b; - - old_snd_nxt = tc->snd_nxt; - tc->snd_nxt = tc->snd_una; + u32 bi, n_bytes; TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1); @@ -1767,7 +1758,6 @@ tcp_retransmit_first_unacked (tcp_worker_ctx_t * wrk, tcp_connection_t * tc) bi = vlib_get_buffer_index (vm, b); tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4); - tc->snd_nxt = old_snd_nxt; return 0; } @@ -1780,8 +1770,7 @@ tcp_fast_retransmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_main_t *vm = wrk->vm; vlib_buffer_t *b = 0; - tc->snd_nxt = tc->snd_una_max; - offset = tc->snd_una_max - tc->snd_una; + offset = tc->snd_nxt - tc->snd_una; while (n_segs < burst_size) { n_written = tcp_prepare_segment (wrk, tc, offset, tc->snd_mss, &b); @@ -1792,6 +1781,9 @@ tcp_fast_retransmit_unsent (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, tcp_enqueue_to_output (wrk, b, bi, tc->c_is_ip4); offset += n_written; n_segs += 1; + + tc->snd_nxt += n_written; + tc->snd_una_max = seq_max (tc->snd_nxt, tc->snd_una_max); } done: @@ -1814,9 +1806,8 @@ tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, vlib_main_t *vm = wrk->vm; vlib_buffer_t *b = 0; sack_scoreboard_t *sb; - u32 bi, old_snd_nxt; + u32 bi, max_deq; int snd_space; - u32 max_deq; u8 snd_limited = 0, can_rescue = 0; ASSERT (tcp_in_fastrecovery (tc)); @@ -1829,12 +1820,11 @@ tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, } TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0); - old_snd_nxt = tc->snd_nxt; sb = &tc->sack_sb; hole = scoreboard_get_hole (sb, sb->cur_rxt_hole); max_deq = transport_max_tx_dequeue (&tc->connection); - max_deq -= tc->snd_una_max - tc->snd_una; + max_deq -= tc->snd_nxt - tc->snd_una; while (snd_space > 0 && n_segs < burst_size) { @@ -1867,7 +1857,6 @@ tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, max_bytes = clib_min (max_bytes, snd_space); offset = tc->snd_congestion - tc->snd_una - max_bytes; sb->rescue_rxt = tc->snd_congestion; - tc->snd_nxt = tc->snd_una + offset; n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes, &b); if (!n_written) @@ -1885,7 +1874,6 @@ tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, break; offset = sb->high_rxt - tc->snd_una; - tc->snd_nxt = sb->high_rxt; n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, max_bytes, &b); ASSERT (n_written <= snd_space); @@ -1906,8 +1894,6 @@ tcp_fast_retransmit_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, tcp_program_fastretransmit (wrk, tc); done: - /* If window allows, send 1 SMSS of new data */ - tc->snd_nxt = old_snd_nxt; return n_segs; } @@ -1918,14 +1904,13 @@ int tcp_fast_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, u32 burst_size) { - u32 n_written = 0, offset = 0, bi, old_snd_nxt, max_deq, n_segs_now; + u32 n_written = 0, offset = 0, bi, max_deq, n_segs_now; vlib_main_t *vm = wrk->vm; int snd_space, n_segs = 0; vlib_buffer_t *b; ASSERT (tcp_in_fastrecovery (tc)); TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 0); - old_snd_nxt = tc->snd_nxt; if (!tcp_fastrecovery_first (tc)) goto send_unsent; @@ -1933,7 +1918,6 @@ tcp_fast_retransmit_no_sack (tcp_worker_ctx_t * wrk, tcp_connection_t * tc, /* RFC 6582: [If a partial ack], retransmit the first unacknowledged * segment. */ snd_space = tc->sack_sb.last_bytes_delivered; - tc->snd_nxt = tc->snd_una; while (snd_space > 0 && n_segs < burst_size) { n_written = tcp_prepare_retransmit_segment (wrk, tc, offset, @@ -1961,7 +1945,7 @@ send_unsent: goto done; max_deq = transport_max_tx_dequeue (&tc->connection); - max_deq -= tc->snd_una_max - tc->snd_una; + max_deq -= tc->snd_nxt - tc->snd_una; if (max_deq) { snd_space = clib_min (max_deq, snd_space); @@ -1972,9 +1956,6 @@ send_unsent: n_segs += n_segs_now; } - /* Restore snd_nxt */ - tc->snd_nxt = old_snd_nxt; - done: tcp_fastrecovery_first_off (tc); return n_segs; |