aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2018-09-25 14:00:34 -0700
committerDamjan Marion <dmarion@me.com>2018-11-09 11:24:22 +0000
commit2e31cc35ca5db7f16c8052578d79f1ec84c0acb5 (patch)
tree673f9bc946628d9d554c126f92d75eab4d67d0cb /src
parent97670eb3c643eefbecfbe2d61a8f06cde9516778 (diff)
tcp: basic cubic implementation
Because the code is not optimized, newreno is still the default congestion control algorithm. Change-Id: I7061cc80c5a75fa8e8265901fae4ea2888e35173 Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src')
-rw-r--r--src/vnet/CMakeLists.txt1
-rw-r--r--src/vnet/tcp/tcp.c21
-rw-r--r--src/vnet/tcp/tcp.h34
-rw-r--r--src/vnet/tcp/tcp_cubic.c194
-rw-r--r--src/vnet/tcp/tcp_input.c11
-rw-r--r--src/vnet/tcp/tcp_newreno.c10
-rw-r--r--src/vnet/tcp/tcp_output.c2
7 files changed, 259 insertions, 14 deletions
diff --git a/src/vnet/CMakeLists.txt b/src/vnet/CMakeLists.txt
index 708e56d6370..d7aa4a5a951 100644
--- a/src/vnet/CMakeLists.txt
+++ b/src/vnet/CMakeLists.txt
@@ -537,6 +537,7 @@ list(APPEND VNET_SOURCES
tcp/tcp_output.c
tcp/tcp_input.c
tcp/tcp_newreno.c
+ tcp/tcp_cubic.c
tcp/tcp.c
)
diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c
index d759cf0d0cd..695f614a91c 100644
--- a/src/vnet/tcp/tcp.c
+++ b/src/vnet/tcp/tcp.c
@@ -510,7 +510,7 @@ tcp_connection_fib_attach (tcp_connection_t * tc)
static void
tcp_cc_init (tcp_connection_t * tc)
{
- tc->cc_algo = tcp_cc_algo_get (TCP_CC_NEWRENO);
+ tc->cc_algo = tcp_cc_algo_get (tcp_main.cc_algo);
tc->cc_algo->init (tc);
}
@@ -1425,11 +1425,27 @@ tcp_init (vlib_main_t * vm)
tcp_api_reference ();
tm->tx_pacing = 1;
+ tm->cc_algo = TCP_CC_NEWRENO;
return 0;
}
VLIB_INIT_FUNCTION (tcp_init);
+uword
+unformat_tcp_cc_algo (unformat_input_t * input, va_list * va)
+{
+ uword *result = va_arg (*va, uword *);
+
+ if (unformat (input, "newreno"))
+ *result = TCP_CC_NEWRENO;
+ else if (unformat (input, "cubic"))
+ *result = TCP_CC_CUBIC;
+ else
+ return 0;
+
+ return 1;
+}
+
static clib_error_t *
tcp_config_fn (vlib_main_t * vm, unformat_input_t * input)
{
@@ -1451,6 +1467,9 @@ tcp_config_fn (vlib_main_t * vm, unformat_input_t * input)
;
else if (unformat (input, "no-tx-pacing"))
tm->tx_pacing = 0;
+ else if (unformat (input, "cc-algo %U", unformat_tcp_cc_algo,
+ &tm->cc_algo))
+ ;
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h
index 843b90d987e..46262978a83 100644
--- a/src/vnet/tcp/tcp.h
+++ b/src/vnet/tcp/tcp.h
@@ -30,6 +30,7 @@
#define TCP_PAWS_IDLE 24 * 24 * 60 * 60 * THZ /**< 24 days */
#define TCP_FIB_RECHECK_PERIOD 1 * THZ /**< Recheck every 1s */
#define TCP_MAX_OPTION_SPACE 40
+#define TCP_CC_DATA_SZ 20
#define TCP_DUPACK_THRESHOLD 3
#define TCP_MAX_RX_FIFO_SIZE 32 << 20
@@ -249,6 +250,7 @@ u8 *format_tcp_scoreboard (u8 * s, va_list * args);
typedef enum _tcp_cc_algorithm_type
{
TCP_CC_NEWRENO,
+ TCP_CC_CUBIC,
} tcp_cc_algorithm_type_e;
typedef struct _tcp_cc_algorithm tcp_cc_algorithm_t;
@@ -262,6 +264,7 @@ typedef enum _tcp_cc_ack_t
typedef struct _tcp_connection
{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
transport_connection_t connection; /**< Common transport data. First! */
u8 state; /**< TCP state as per tcp_state_t */
@@ -315,6 +318,7 @@ typedef struct _tcp_connection
u32 tsecr_last_ack; /**< Timestamp echoed to us in last healthy ACK */
u32 snd_congestion; /**< snd_una_max when congestion is detected */
tcp_cc_algorithm_t *cc_algo; /**< Congestion control algorithm */
+ u8 cc_data[TCP_CC_DATA_SZ]; /**< Congestion control algo private data */
/* RTT and RTO */
u32 rto; /**< Retransmission timeout */
@@ -329,6 +333,7 @@ typedef struct _tcp_connection
u32 limited_transmit; /**< snd_nxt when limited transmit starts */
u32 last_fib_check; /**< Last time we checked fib route for peer */
u32 sw_if_index; /**< Interface for the connection */
+ u32 tx_fifo_size; /**< Tx fifo size. Used to constrain cwnd */
} tcp_connection_t;
struct _tcp_cc_algorithm
@@ -460,6 +465,8 @@ typedef struct _tcp_main
/** fault-injection */
f64 buffer_fail_fraction;
+
+ u8 cc_algo;
} tcp_main_t;
extern tcp_main_t tcp_main;
@@ -648,6 +655,25 @@ tcp_initial_cwnd (const tcp_connection_t * tc)
return 4 * tc->snd_mss;
}
+/*
+ * Accumulate acked bytes for cwnd increase
+ *
+ * Once threshold bytes are accumulated, snd_mss bytes are added
+ * to the cwnd.
+ */
+always_inline void
+tcp_cwnd_accumulate (tcp_connection_t * tc, u32 thresh, u32 bytes)
+{
+ tc->cwnd_acc_bytes += bytes;
+ if (tc->cwnd_acc_bytes >= thresh)
+ {
+ u32 inc = tc->cwnd_acc_bytes / thresh;
+ tc->cwnd_acc_bytes -= inc * thresh;
+ tc->cwnd += inc * tc->snd_mss;
+ tc->cwnd = clib_min (tc->cwnd, tc->tx_fifo_size);
+ }
+}
+
always_inline u32
tcp_loss_wnd (const tcp_connection_t * tc)
{
@@ -870,6 +896,14 @@ void tcp_cc_algo_register (tcp_cc_algorithm_type_e type,
tcp_cc_algorithm_t *tcp_cc_algo_get (tcp_cc_algorithm_type_e type);
+static inline void *
+tcp_cc_data (tcp_connection_t * tc)
+{
+ return (void *) tc->cc_data;
+}
+
+void newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type);
+
/**
* Push TCP header to buffer
*
diff --git a/src/vnet/tcp/tcp_cubic.c b/src/vnet/tcp/tcp_cubic.c
new file mode 100644
index 00000000000..b9a1c3da06a
--- /dev/null
+++ b/src/vnet/tcp/tcp_cubic.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/tcp/tcp.h>
+#include <math.h>
+
+#define beta_cubic 0.7
+#define cubic_c 0.4
+#define west_const (3 * (1 - beta_cubic) / (1 + beta_cubic))
+
+typedef struct cubic_data_
+{
+ /** time period (in seconds) needed to increase the current window
+ * size to W_max if there are no further congestion events */
+ f64 K;
+
+ /** time (in sec) since the start of current congestion avoidance */
+ f64 t_start;
+
+ /** Inflection point of the cubic function */
+ u32 w_max;
+
+} __clib_packed cubic_data_t;
+
+STATIC_ASSERT (sizeof (cubic_data_t) <= TCP_CC_DATA_SZ, "cubic data len");
+
+static inline f64
+cubic_time (u32 thread_index)
+{
+ return transport_time_now (thread_index);
+}
+
+/**
+ * RFC 8312 Eq. 1
+ *
+ * CUBIC window increase function. Time and K need to be provided in seconds.
+ */
+static inline u64
+W_cubic (cubic_data_t * cd, f64 t)
+{
+ f64 diff = t - cd->K;
+
+ /* W_cubic(t) = C*(t-K)^3 + W_max */
+ return cubic_c * diff * diff * diff + cd->w_max;
+}
+
+/**
+ * RFC 8312 Eq. 2
+ */
+static inline f64
+K_cubic (cubic_data_t * cd)
+{
+ /* K = cubic_root(W_max*(1-beta_cubic)/C) */
+ return pow (cd->w_max * (1 - beta_cubic) / cubic_c, 1 / 3.0);
+}
+
+/**
+ * RFC 8312 Eq. 4
+ *
+ * Estimates the window size of AIMD(alpha_aimd, beta_aimd) for
+ * alpha_aimd=3*(1-beta_cubic)/(1+beta_cubic) and beta_aimd=beta_cubic.
+ * Time (t) and rtt should be provided in seconds
+ */
+static inline u32
+W_est (cubic_data_t * cd, f64 t, f64 rtt)
+{
+ /* W_est(t) = W_max*beta_cubic+[3*(1-beta_cubic)/(1+beta_cubic)]*(t/RTT) */
+ return cd->w_max * beta_cubic + west_const * (t / rtt);
+}
+
+static void
+cubic_congestion (tcp_connection_t * tc)
+{
+ cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
+
+ cd->w_max = tc->cwnd / tc->snd_mss;
+ tc->ssthresh = clib_max (tc->cwnd * beta_cubic, 2 * tc->snd_mss);
+}
+
+static void
+cubic_recovered (tcp_connection_t * tc)
+{
+ cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
+ cd->t_start = cubic_time (tc->c_thread_index);
+ cd->K = K_cubic (cd);
+ tc->cwnd = tc->ssthresh;
+}
+
+static void
+cubic_rcv_ack (tcp_connection_t * tc)
+{
+ cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
+ u64 w_cubic, w_aimd;
+ f64 t, rtt_sec;
+ u32 thresh;
+
+ /* Constrained by tx fifo, can't grow further */
+ if (tc->cwnd >= tc->tx_fifo_size)
+ return;
+
+ if (tcp_in_slowstart (tc))
+ {
+ tc->cwnd += clib_min (tc->snd_mss, tc->bytes_acked);
+ return;
+ }
+
+ t = cubic_time (tc->c_thread_index) - cd->t_start;
+ rtt_sec = clib_min (tc->mrtt_us, (f64) tc->srtt * TCP_TICK);
+
+ w_cubic = W_cubic (cd, t + rtt_sec) * tc->snd_mss;
+ w_aimd = W_est (cd, t, rtt_sec) * tc->snd_mss;
+ if (w_cubic < w_aimd)
+ {
+ tcp_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked);
+ }
+ else
+ {
+ if (w_cubic > tc->cwnd)
+ {
+ /* For NewReno and slow start, we increment cwnd based on the
+ * number of bytes acked, not the number of acks received. In
+ * particular, for NewReno we increment the cwnd by 1 snd_mss
+ * only after we accumulate 1 cwnd of acked bytes (RFC 3465).
+ *
+ * For Cubic, as per RFC 8312 we should increment cwnd by
+ * (w_cubic - cwnd)/cwnd for each ack. Instead of using that,
+ * we compute the number of packets that need to be acked
+ * before adding snd_mss to cwnd and compute the threshold
+ */
+ thresh = (tc->snd_mss * tc->cwnd) / (w_cubic - tc->cwnd);
+
+ /* Make sure we don't increase cwnd more often than every
+ * 2 segments */
+ thresh = clib_max (thresh, 2 * tc->snd_mss);
+ }
+ else
+ {
+ /* Practically we can't increment so just inflate threshold */
+ thresh = 1000 * tc->cwnd;
+ }
+ tcp_cwnd_accumulate (tc, thresh, tc->bytes_acked);
+ }
+}
+
+static void
+cubic_conn_init (tcp_connection_t * tc)
+{
+ cubic_data_t *cd = (cubic_data_t *) tcp_cc_data (tc);
+ tc->ssthresh = tc->snd_wnd;
+ tc->cwnd = tcp_initial_cwnd (tc);
+ cd->w_max = 0;
+ cd->K = 0;
+ cd->t_start = cubic_time (tc->c_thread_index);
+}
+
+const static tcp_cc_algorithm_t tcp_cubic = {
+ .congestion = cubic_congestion,
+ .recovered = cubic_recovered,
+ .rcv_ack = cubic_rcv_ack,
+ .rcv_cong_ack = newreno_rcv_cong_ack,
+ .init = cubic_conn_init
+};
+
+clib_error_t *
+cubic_init (vlib_main_t * vm)
+{
+ clib_error_t *error = 0;
+
+ tcp_cc_algo_register (TCP_CC_CUBIC, &tcp_cubic);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (cubic_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c
index 0f1ab1ab3b0..2578b7d1051 100644
--- a/src/vnet/tcp/tcp_input.c
+++ b/src/vnet/tcp/tcp_input.c
@@ -1441,7 +1441,7 @@ partial_ack:
*/
/* XXX limit this only to first partial ack? */
- tcp_retransmit_timer_force_update (tc);
+ tcp_retransmit_timer_update (tc);
/* RFC6675: If the incoming ACK is a cumulative acknowledgment,
* reset dupacks to 0. Also needed if in congestion recovery */
@@ -2459,6 +2459,8 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
goto drop;
}
+ new_tc0->tx_fifo_size =
+ transport_tx_fifo_size (&new_tc0->connection);
/* Update rtt with the syn-ack sample */
tcp_estimate_initial_rtt (new_tc0);
TCP_EVT_DBG (TCP_EVT_SYNACK_RCVD, new_tc0);
@@ -2478,8 +2480,10 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
goto drop;
}
- tc0->rtt_ts = 0;
- tcp_init_snd_vars (tc0);
+ new_tc0->tx_fifo_size =
+ transport_tx_fifo_size (&new_tc0->connection);
+ new_tc0->rtt_ts = 0;
+ tcp_init_snd_vars (new_tc0);
tcp_send_synack (new_tc0);
error0 = TCP_ERROR_SYNS_RCVD;
goto drop;
@@ -3090,6 +3094,7 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
goto drop;
}
+ child0->tx_fifo_size = transport_tx_fifo_size (&child0->connection);
tcp_send_synack (child0);
tcp_timer_set (child0, TCP_TIMER_ESTABLISH, TCP_SYN_RCVD_TIME);
diff --git a/src/vnet/tcp/tcp_newreno.c b/src/vnet/tcp/tcp_newreno.c
index a9ec58c262f..420e47a8ca4 100644
--- a/src/vnet/tcp/tcp_newreno.c
+++ b/src/vnet/tcp/tcp_newreno.c
@@ -37,15 +37,7 @@ newreno_rcv_ack (tcp_connection_t * tc)
else
{
/* tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1); */
- tc->cwnd_acc_bytes += tc->bytes_acked;
- if (tc->cwnd_acc_bytes >= tc->cwnd)
- {
- u32 inc = tc->cwnd_acc_bytes / tc->cwnd;
- tc->cwnd_acc_bytes -= inc * tc->cwnd;
- tc->cwnd += inc * tc->snd_mss;
- }
- tc->cwnd = clib_min (tc->cwnd,
- transport_tx_fifo_size (&tc->connection));
+ tcp_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked);
}
}
diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c
index 192e820e648..e16095b635d 100644
--- a/src/vnet/tcp/tcp_output.c
+++ b/src/vnet/tcp/tcp_output.c
@@ -1708,7 +1708,7 @@ tcp_timer_persist_handler (u32 index)
/* Problem already solved or worse */
if (tc->state == TCP_STATE_CLOSED || tc->state > TCP_STATE_ESTABLISHED
- || tc->snd_wnd > tc->snd_mss || tcp_in_recovery (tc))
+ || tc->snd_wnd > tc->snd_mss)
return;
available_bytes = session_tx_fifo_max_dequeue (&tc->connection);