summaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2022-01-05 08:47:11 -0800
committerFlorin Coras <florin.coras@gmail.com>2022-01-06 20:18:53 +0000
commit0f8b91f5d8e4bc2f9a0166440abaf00b2bda29bb (patch)
treeff5c4e458895bdd96a7ee20f285e9035df203e44 /src/vnet
parent273968cf2d0343b2f4e3217f25c0752f20cf03c5 (diff)
tcp: use bytes delivered to compute cwnd
Should estimated cwnd better with loss Type: improvement Signed-off-by: Florin Coras <fcoras@cisco.com> Change-Id: Idd75d40dbab212ac0a5d533009c5540b1a58f4c4
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/tcp/tcp_cubic.c6
-rw-r--r--src/vnet/tcp/tcp_input.c6
-rw-r--r--src/vnet/tcp/tcp_newreno.c4
3 files changed, 9 insertions, 7 deletions
diff --git a/src/vnet/tcp/tcp_cubic.c b/src/vnet/tcp/tcp_cubic.c
index cc2ffeae9f0..3160e528e6d 100644
--- a/src/vnet/tcp/tcp_cubic.c
+++ b/src/vnet/tcp/tcp_cubic.c
@@ -158,7 +158,7 @@ cubic_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
if (tcp_in_slowstart (tc))
{
- tc->cwnd += tc->bytes_acked;
+ tc->cwnd += rs->delivered;
return;
}
@@ -169,7 +169,7 @@ cubic_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
w_aimd = (u64) W_est (cd, t, rtt_sec) * tc->snd_mss;
if (w_cubic < w_aimd)
{
- cubic_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked);
+ cubic_cwnd_accumulate (tc, tc->cwnd, rs->delivered);
}
else
{
@@ -195,7 +195,7 @@ cubic_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
/* Practically we can't increment so just inflate threshold */
thresh = 50 * tc->cwnd;
}
- cubic_cwnd_accumulate (tc, thresh, tc->bytes_acked);
+ cubic_cwnd_accumulate (tc, thresh, rs->delivered);
}
}
diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c
index 30d57a3bb3e..5f165c01b4c 100644
--- a/src/vnet/tcp/tcp_input.c
+++ b/src/vnet/tcp/tcp_input.c
@@ -866,8 +866,7 @@ tcp_cc_handle_event (tcp_connection_t * tc, tcp_rate_sample_t * rs,
tcp_fastrecovery_first_on (tc);
tc->rxt_delivered += tc->sack_sb.rxt_sacked;
- tc->prr_delivered += tc->bytes_acked + tc->sack_sb.last_sacked_bytes
- - tc->sack_sb.last_bytes_delivered;
+ tc->prr_delivered += rs->delivered;
}
else
{
@@ -1042,6 +1041,9 @@ process_ack:
if (tc->cfg_flags & TCP_CFG_F_RATE_SAMPLE)
tcp_bt_sample_delivery_rate (tc, &rs);
+ else
+ rs.delivered = tc->bytes_acked + tc->sack_sb.last_sacked_bytes -
+ tc->sack_sb.last_bytes_delivered;
if (tc->bytes_acked + tc->sack_sb.last_sacked_bytes)
{
diff --git a/src/vnet/tcp/tcp_newreno.c b/src/vnet/tcp/tcp_newreno.c
index c5ffc2a4109..39035bac00c 100644
--- a/src/vnet/tcp/tcp_newreno.c
+++ b/src/vnet/tcp/tcp_newreno.c
@@ -49,12 +49,12 @@ newreno_rcv_ack (tcp_connection_t * tc, tcp_rate_sample_t * rs)
{
if (tcp_in_slowstart (tc))
{
- tc->cwnd += clib_min (tc->snd_mss, tc->bytes_acked);
+ tc->cwnd += clib_min (tc->snd_mss, rs->delivered);
}
else
{
/* tc->cwnd += clib_max ((tc->snd_mss * tc->snd_mss) / tc->cwnd, 1); */
- tcp_cwnd_accumulate (tc, tc->cwnd, tc->bytes_acked);
+ tcp_cwnd_accumulate (tc, tc->cwnd, rs->delivered);
}
}