summaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/session/node.c2
-rw-r--r--src/vnet/tcp/tcp.c17
-rw-r--r--src/vnet/tcp/tcp.h10
-rw-r--r--src/vnet/tcp/tcp_input.c6
-rw-r--r--src/vnet/tcp/tcp_output.c104
5 files changed, 97 insertions, 42 deletions
diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c
index 210754fa723..2d12ee2bac9 100644
--- a/src/vnet/session/node.c
+++ b/src/vnet/session/node.c
@@ -95,8 +95,8 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node,
tc0 = transport_vft->get_connection (s0->connection_index, thread_index);
/* Make sure we have space to send and there's something to dequeue */
- snd_space0 = transport_vft->send_space (tc0);
snd_mss0 = transport_vft->send_mss (tc0);
+ snd_space0 = transport_vft->send_space (tc0);
/* Can't make any progress */
if (snd_space0 == 0 || snd_mss0 == 0)
diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c
index e80e2ec9b7c..224ee0dd160 100644
--- a/src/vnet/tcp/tcp.c
+++ b/src/vnet/tcp/tcp.c
@@ -331,7 +331,7 @@ void
tcp_connection_init_vars (tcp_connection_t * tc)
{
tcp_connection_timers_init (tc);
- tcp_set_snd_mss (tc);
+ tcp_init_mss (tc);
scoreboard_init (&tc->sack_sb);
tcp_cc_init (tc);
}
@@ -560,10 +560,23 @@ tcp_half_open_session_get_transport (u32 conn_index)
return &tc->connection;
}
+/**
+ * Compute maximum segment size for session layer.
+ *
+ * Since the result needs to be the actual data length, it first computes
+ * the tcp options to be used in the next burst and subtracts their
+ * length from the connection's snd_mss.
+ */
u16
tcp_session_send_mss (transport_connection_t * trans_conn)
{
tcp_connection_t *tc = (tcp_connection_t *) trans_conn;
+
+ /* Ensure snd_mss does accurately reflect the amount of data we can push
+ * in a segment. This also makes sure that options are updated according to
+ * the current state of the connection. */
+ tcp_update_snd_mss (tc);
+
return tc->snd_mss;
}
@@ -607,7 +620,7 @@ tcp_session_send_space (transport_connection_t * trans_conn)
tc->snd_nxt = tc->snd_una_max;
snd_space = tcp_available_wnd (tc) - tc->rtx_bytes
- (tc->snd_una_max - tc->snd_congestion);
- if (snd_space <= 0)
+ if (snd_space <= 0 || (tc->snd_una_max - tc->snd_una) >= tc->snd_wnd)
return 0;
return tcp_round_snd_space (tc, snd_space);
}
diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h
index c75479dce5c..8212ada785e 100644
--- a/src/vnet/tcp/tcp.h
+++ b/src/vnet/tcp/tcp.h
@@ -211,7 +211,9 @@ typedef struct _tcp_connection
u32 irs; /**< initial remote sequence */
/* Options */
- tcp_options_t opt; /**< TCP connection options parsed */
+ tcp_options_t opt; /**< TCP connection options parsed */
+ tcp_options_t snd_opts; /**< Tx options for connection */
+ u8 snd_opts_len; /**< Tx options len */
u8 rcv_wscale; /**< Window scale to advertise to peer */
u8 snd_wscale; /**< Window scale to use when sending */
u32 tsval_recent; /**< Last timestamp received */
@@ -241,7 +243,8 @@ typedef struct _tcp_connection
u32 rtt_ts; /**< Timestamp for tracked ACK */
u32 rtt_seq; /**< Sequence number for tracked ACK */
- u16 snd_mss; /**< Send MSS */
+ u16 snd_mss; /**< Effective send max seg (data) size */
+ u16 mss; /**< Our max seg size that includes options */
} tcp_connection_t;
struct _tcp_cc_algorithm
@@ -405,7 +408,8 @@ void tcp_make_synack (tcp_connection_t * ts, vlib_buffer_t * b);
void tcp_send_reset (vlib_buffer_t * pkt, u8 is_ip4);
void tcp_send_syn (tcp_connection_t * tc);
void tcp_send_fin (tcp_connection_t * tc);
-void tcp_set_snd_mss (tcp_connection_t * tc);
+void tcp_init_mss (tcp_connection_t * tc);
+void tcp_update_snd_mss (tcp_connection_t * tc);
always_inline u32
tcp_end_seq (tcp_header_t * th, u32 len)
diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c
index e9c52c5e27f..d268251cb38 100644
--- a/src/vnet/tcp/tcp_input.c
+++ b/src/vnet/tcp/tcp_input.c
@@ -389,10 +389,14 @@ static int
tcp_update_rtt (tcp_connection_t * tc, u32 ack)
{
u32 mrtt = 0;
+ u8 rtx_acked;
+
+ /* Determine if only rtx bytes are acked. TODO fast retransmit */
+ rtx_acked = tc->rto_boff && (tc->bytes_acked <= tc->snd_mss);
/* Karn's rule, part 1. Don't use retransmitted segments to estimate
* RTT because they're ambiguous. */
- if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq) && !tc->rto_boff)
+ if (tc->rtt_ts && seq_geq (ack, tc->rtt_seq) && !rtx_acked)
{
mrtt = tcp_time_now () - tc->rtt_ts;
}
diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c
index 7ee930c61af..2a1b140750c 100644
--- a/src/vnet/tcp/tcp_output.c
+++ b/src/vnet/tcp/tcp_output.c
@@ -64,26 +64,6 @@ format_tcp_tx_trace (u8 * s, va_list * args)
return s;
}
-void
-tcp_set_snd_mss (tcp_connection_t * tc)
-{
- u16 snd_mss;
-
- /* TODO find our iface MTU */
- snd_mss = dummy_mtu;
-
- /* TODO cache mss and consider PMTU discovery */
- snd_mss = clib_min (tc->opt.mss, snd_mss);
-
- tc->snd_mss = snd_mss;
-
- if (tc->snd_mss == 0)
- {
- clib_warning ("snd mss is 0");
- tc->snd_mss = dummy_mtu;
- }
-}
-
static u8
tcp_window_compute_scale (u32 available_space)
{
@@ -100,7 +80,7 @@ tcp_window_compute_scale (u32 available_space)
always_inline u32
tcp_initial_wnd_unscaled (tcp_connection_t * tc)
{
- return TCP_IW_N_SEGMENTS * dummy_mtu;
+ return TCP_IW_N_SEGMENTS * tc->mss;
}
/**
@@ -310,7 +290,7 @@ tcp_make_synack_options (tcp_connection_t * tc, tcp_options_t * opts)
u8 len = 0;
opts->flags |= TCP_OPTS_FLAG_MSS;
- opts->mss = dummy_mtu; /*XXX discover that */
+ opts->mss = tc->mss;
len += TCP_OPTION_LEN_MSS;
if (tcp_opts_wscale (&tc->opt))
@@ -389,6 +369,57 @@ tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts,
}
}
+/**
+ * Update max segment size we're able to process.
+ *
+ * The value is constrained by our interface's MTU and IP options. It is
+ * also what we advertise to our peer.
+ */
+void
+tcp_update_rcv_mss (tcp_connection_t * tc)
+{
+ /* TODO find our iface MTU */
+ tc->mss = dummy_mtu;
+}
+
+/**
+ * Update snd_mss to reflect the effective segment size that we can send
+ * by taking into account all TCP options, including SACKs
+ */
+void
+tcp_update_snd_mss (tcp_connection_t * tc)
+{
+ /* Compute options to be used for connection. These may be reused when
+ * sending data or to compute the effective mss (snd_mss) */
+ tc->snd_opts_len =
+ tcp_make_options (tc, &tc->snd_opts, TCP_STATE_ESTABLISHED);
+
+ /* XXX check if MTU has been updated */
+ tc->snd_mss = clib_min (tc->mss, tc->opt.mss) - tc->snd_opts_len;
+}
+
+void
+tcp_init_mss (tcp_connection_t * tc)
+{
+ tcp_update_rcv_mss (tc);
+
+ /* TODO cache mss and consider PMTU discovery */
+ tc->snd_mss = clib_min (tc->opt.mss, tc->mss);
+
+ if (tc->snd_mss == 0)
+ {
+ clib_warning ("snd mss is 0");
+ tc->snd_mss = tc->mss;
+ }
+
+ /* We should have enough space for 40 bytes of options */
+ ASSERT (tc->snd_mss > 45);
+
+ /* If we use timestamp option, account for it */
+ if (tcp_opts_tstamp (&tc->opt))
+ tc->snd_mss -= TCP_OPTION_LEN_TIMESTAMP;
+}
+
#define tcp_get_free_buffer_index(tm, bidx) \
do { \
u32 *my_tx_buffers, n_free_buffers; \
@@ -886,20 +917,20 @@ tcp_make_state_flags (tcp_state_t next_state)
*/
static void
tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b,
- tcp_state_t next_state)
+ tcp_state_t next_state, u8 compute_opts)
{
u32 advertise_wnd, data_len;
- u8 tcp_opts_len, tcp_hdr_opts_len, opts_write_len, flags;
- tcp_options_t _snd_opts, *snd_opts = &_snd_opts;
+ u8 tcp_hdr_opts_len, opts_write_len, flags;
tcp_header_t *th;
data_len = b->current_length;
vnet_buffer (b)->tcp.flags = 0;
- /* Make and write options */
- memset (snd_opts, 0, sizeof (*snd_opts));
- tcp_opts_len = tcp_make_options (tc, snd_opts, next_state);
- tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t);
+ if (compute_opts)
+ tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
+
+ /* Write pre-computed options */
+ tcp_hdr_opts_len = tc->snd_opts_len + sizeof (tcp_header_t);
/* Get rcv window to advertise */
advertise_wnd = tcp_window_to_advertise (tc, next_state);
@@ -910,9 +941,9 @@ tcp_push_hdr_i (tcp_connection_t * tc, vlib_buffer_t * b,
tc->rcv_nxt, tcp_hdr_opts_len, flags,
advertise_wnd);
- opts_write_len = tcp_options_write ((u8 *) (th + 1), snd_opts);
+ opts_write_len = tcp_options_write ((u8 *) (th + 1), &tc->snd_opts);
- ASSERT (opts_write_len == tcp_opts_len);
+ ASSERT (opts_write_len == tc->snd_opts_len);
/* Tag the buffer with the connection index */
vnet_buffer (b)->tcp.connection_index = tc->c_c_index;
@@ -993,6 +1024,8 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b,
goto done;
}
+ tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
+
ASSERT (max_bytes <= tc->snd_mss);
n_bytes = stream_session_peek_bytes (&tc->connection,
@@ -1000,7 +1033,7 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b,
max_bytes);
ASSERT (n_bytes != 0);
b->current_length = n_bytes;
- tcp_push_hdr_i (tc, b, tc->state);
+ tcp_push_hdr_i (tc, b, tc->state, 0);
tc->rtx_bytes += n_bytes;
done:
@@ -1097,7 +1130,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn)
vlib_buffer_make_headroom (b, MAX_HDRS_LEN);
- tcp_push_hdr_i (tc, b, tc->state);
+ tcp_push_hdr_i (tc, b, tc->state, 1);
/* Account for the SYN */
tc->snd_nxt += 1;
@@ -1168,6 +1201,7 @@ tcp_timer_persist_handler (u32 index)
/* Try to force the first unsent segment */
tcp_get_free_buffer_index (tm, &bi);
b = vlib_get_buffer (vm, bi);
+ tc->snd_opts_len = tcp_make_options (tc, &tc->snd_opts, tc->state);
n_bytes = stream_session_peek_bytes (&tc->connection,
vlib_buffer_get_current (b),
tc->snd_una_max - tc->snd_una,
@@ -1180,7 +1214,7 @@ tcp_timer_persist_handler (u32 index)
}
b->current_length = n_bytes;
- tcp_push_hdr_i (tc, b, tc->state);
+ tcp_push_hdr_i (tc, b, tc->state, 0);
tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4);
/* Re-enable persist timer */
@@ -1507,7 +1541,7 @@ tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b)
tcp_connection_t *tc;
tc = (tcp_connection_t *) tconn;
- tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED);
+ tcp_push_hdr_i (tc, b, TCP_STATE_ESTABLISHED, 0);
return 0;
}