aboutsummaryrefslogtreecommitdiffstats
path: root/lib/libtle_l4p/tcp_rxtx.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libtle_l4p/tcp_rxtx.c')
-rw-r--r--lib/libtle_l4p/tcp_rxtx.c166
1 files changed, 106 insertions, 60 deletions
diff --git a/lib/libtle_l4p/tcp_rxtx.c b/lib/libtle_l4p/tcp_rxtx.c
index 4f43557..3d1c550 100644
--- a/lib/libtle_l4p/tcp_rxtx.c
+++ b/lib/libtle_l4p/tcp_rxtx.c
@@ -63,7 +63,7 @@ rx_obtain_listen_stream(const struct tle_dev *dev, const union pkt_info *pi,
return NULL;
/* check that we have a proper stream. */
- if (s->tcb.state != TCP_ST_LISTEN) {
+ if (s->tcb.state != TLE_TCP_ST_LISTEN) {
tcp_stream_release(s);
s = NULL;
}
@@ -80,7 +80,7 @@ rx_acquire_stream(struct tle_stream *ts)
if (tcp_stream_acquire(s) < 0)
return NULL;
- else if (s->tcb.state == TCP_ST_CLOSED) {
+ else if (s->tcb.state == TLE_TCP_ST_CLOSED) {
tcp_stream_release(s);
return NULL;
}
@@ -104,7 +104,7 @@ rx_obtain_stream(const struct tle_dev *dev, struct stbl *st,
if (tcp_stream_acquire(s) < 0)
return NULL;
/* check that we have a proper stream. */
- else if (s->tcb.state == TCP_ST_CLOSED) {
+ else if (s->tcb.state == TLE_TCP_ST_CLOSED) {
tcp_stream_release(s);
s = NULL;
}
@@ -199,6 +199,12 @@ get_ip_pid(struct tle_dev *dev, uint32_t num, uint32_t type, uint32_t st)
}
}
+static inline uint32_t
+tcp_stream_adjust_tms(const struct tle_tcp_stream *s, uint32_t tms)
+{
+ return tms - s->ts_offset;
+}
+
static inline void
fill_tcph(struct rte_tcp_hdr *l4h, const struct tcb *tcb, union l4_ports port,
uint32_t seq, uint8_t hlen, uint8_t flags)
@@ -271,19 +277,19 @@ tcp_fill_mbuf(struct rte_mbuf *m, const struct tle_tcp_stream *s,
l3h->packet_id = rte_cpu_to_be_16(pid);
l3h->total_length = rte_cpu_to_be_16(plen + dst->l3_len + l4);
- if ((ol_flags & PKT_TX_TCP_CKSUM) != 0)
+ if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) != 0)
l4h->cksum = _ipv4x_phdr_cksum(l3h, m->l3_len,
ol_flags);
else if (swcsm != 0)
l4h->cksum = _ipv4_udptcp_mbuf_cksum(m, len, l3h);
- if ((ol_flags & PKT_TX_IP_CKSUM) == 0 && swcsm != 0)
+ if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0 && swcsm != 0)
l3h->hdr_checksum = _ipv4x_cksum(l3h, m->l3_len);
} else {
struct rte_ipv6_hdr *l3h;
l3h = (struct rte_ipv6_hdr *)(l2h + dst->l2_len);
l3h->payload_len = rte_cpu_to_be_16(plen + l4);
- if ((ol_flags & PKT_TX_TCP_CKSUM) != 0)
+ if ((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) != 0)
l4h->cksum = rte_ipv6_phdr_cksum(l3h, ol_flags);
else if (swcsm != 0)
l4h->cksum = _ipv6_udptcp_mbuf_cksum(m, len, l3h);
@@ -300,7 +306,7 @@ tcp_fill_mbuf(struct rte_mbuf *m, const struct tle_tcp_stream *s,
*/
static inline void
tcp_update_mbuf(struct rte_mbuf *m, uint32_t type, const struct tcb *tcb,
- uint32_t seq, uint32_t pid)
+ uint32_t seq, uint32_t pid, uint8_t tcp_flags)
{
struct rte_tcp_hdr *l4h;
uint32_t len;
@@ -311,6 +317,8 @@ tcp_update_mbuf(struct rte_mbuf *m, uint32_t type, const struct tcb *tcb,
l4h->sent_seq = rte_cpu_to_be_32(seq);
l4h->recv_ack = rte_cpu_to_be_32(tcb->rcv.nxt);
+ l4h->tcp_flags |= tcp_flags;
+
if (tcb->so.ts.raw != 0)
fill_tms_opts(l4h + 1, tcb->snd.ts, tcb->rcv.ts);
@@ -320,12 +328,12 @@ tcp_update_mbuf(struct rte_mbuf *m, uint32_t type, const struct tcb *tcb,
m->l2_len);
l3h->hdr_checksum = 0;
l3h->packet_id = rte_cpu_to_be_16(pid);
- if ((m->ol_flags & PKT_TX_IP_CKSUM) == 0)
+ if ((m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) == 0)
l3h->hdr_checksum = _ipv4x_cksum(l3h, m->l3_len);
}
/* have to calculate TCP checksum in SW */
- if ((m->ol_flags & PKT_TX_TCP_CKSUM) == 0) {
+ if ((m->ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) == 0) {
l4h->cksum = 0;
@@ -388,6 +396,7 @@ tx_data_bulk(struct tle_tcp_stream *s, union seqlen *sl, struct rte_mbuf *mi[],
struct tle_dev *dev;
struct rte_mbuf *mb;
struct rte_mbuf *mo[MAX_PKT_BURST + TCP_MAX_PKT_SEG];
+ uint8_t tcp_flags;
mss = s->tcb.snd.mss;
type = s->s.type;
@@ -398,6 +407,8 @@ tx_data_bulk(struct tle_tcp_stream *s, union seqlen *sl, struct rte_mbuf *mi[],
k = 0;
tn = 0;
fail = 0;
+ tcp_flags = 0x0;
+
for (i = 0; i != num && sl->len != 0 && fail == 0; i++) {
mb = mi[i];
@@ -407,8 +418,12 @@ tx_data_bulk(struct tle_tcp_stream *s, union seqlen *sl, struct rte_mbuf *mi[],
/*fast path, no need to use indirect mbufs. */
if (plen <= sz) {
+ if (i == (num - 1)) {
+ tcp_flags |= TCP_FLAG_PSH;
+ }
+
/* update pkt TCP header */
- tcp_update_mbuf(mb, type, &s->tcb, sl->seq, pid + i);
+ tcp_update_mbuf(mb, type, &s->tcb, sl->seq, pid + i, tcp_flags);
/* keep mbuf till ACK is received. */
rte_pktmbuf_refcnt_update(mb, 1);
@@ -828,13 +843,13 @@ stream_term(struct tle_tcp_stream *s)
{
struct sdr *dr;
- s->tcb.state = TCP_ST_CLOSED;
+ s->tcb.state = TLE_TCP_ST_CLOSED;
rte_smp_wmb();
timer_stop(s);
/* close() was already invoked, schedule final cleanup */
- if ((s->tcb.uop & TCP_OP_CLOSE) != 0) {
+ if ((s->tcb.uop & TLE_TCP_OP_CLOSE) != 0) {
dr = CTX_TCP_SDR(s->s.ctx);
STAILQ_INSERT_TAIL(&dr->be, &s->s, link);
@@ -933,14 +948,14 @@ accept_prep_stream(struct tle_tcp_stream *ps, struct stbl *st,
cs->tcb.snd.ssthresh = cs->tcb.snd.wnd;
cs->tcb.snd.rto_tw = ps->tcb.snd.rto_tw;
- cs->tcb.state = TCP_ST_ESTABLISHED;
+ cs->tcb.state = TLE_TCP_ST_ESTABLISHED;
/* add stream to the table */
cs->ste = stbl_add_stream(st, pi, cs);
if (cs->ste == NULL)
return -ENOBUFS;
- cs->tcb.uop |= TCP_OP_ACCEPT;
+ cs->tcb.uop |= TLE_TCP_OP_ACCEPT;
tcp_stream_up(cs);
return 0;
}
@@ -1058,7 +1073,7 @@ static void
stream_timewait(struct tle_tcp_stream *s, uint32_t rto)
{
if (rto != 0) {
- s->tcb.state = TCP_ST_TIME_WAIT;
+ s->tcb.state = TLE_TCP_ST_TIME_WAIT;
s->tcb.snd.rto = rto;
timer_reset(s);
} else
@@ -1072,27 +1087,29 @@ rx_fin_state(struct tle_tcp_stream *s, struct resp_info *rsp)
int32_t ackfin;
s->tcb.rcv.nxt += 1;
+ s->err.rev |= TLE_TCP_REV_FIN;
ackfin = (s->tcb.snd.una == s->tcb.snd.fss);
state = s->tcb.state;
- if (state == TCP_ST_ESTABLISHED) {
- s->tcb.state = TCP_ST_CLOSE_WAIT;
+ if (state == TLE_TCP_ST_ESTABLISHED) {
+ s->tcb.state = TLE_TCP_ST_CLOSE_WAIT;
/* raise err.ev & err.cb */
if (s->err.ev != NULL)
tle_event_raise(s->err.ev);
else if (s->err.cb.func != NULL)
s->err.cb.func(s->err.cb.data, &s->s);
- } else if (state == TCP_ST_FIN_WAIT_1 || state == TCP_ST_CLOSING) {
+ } else if (state == TLE_TCP_ST_FIN_WAIT_1 ||
+ state == TLE_TCP_ST_CLOSING) {
rsp->flags |= TCP_FLAG_ACK;
if (ackfin != 0)
stream_timewait(s, s->tcb.snd.rto_tw);
else
- s->tcb.state = TCP_ST_CLOSING;
- } else if (state == TCP_ST_FIN_WAIT_2) {
+ s->tcb.state = TLE_TCP_ST_CLOSING;
+ } else if (state == TLE_TCP_ST_FIN_WAIT_2) {
rsp->flags |= TCP_FLAG_ACK;
stream_timewait(s, s->tcb.snd.rto_tw);
- } else if (state == TCP_ST_LAST_ACK && ackfin != 0) {
+ } else if (state == TLE_TCP_ST_LAST_ACK && ackfin != 0) {
stream_term(s);
}
}
@@ -1122,7 +1139,7 @@ rx_fin(struct tle_tcp_stream *s, uint32_t state,
if (ret != 0)
return ret;
- if (state < TCP_ST_ESTABLISHED)
+ if (state < TLE_TCP_ST_ESTABLISHED)
return -EINVAL;
if (plen != 0) {
@@ -1169,7 +1186,7 @@ rx_rst(struct tle_tcp_stream *s, uint32_t state, uint32_t flags,
* In the SYN-SENT state (a RST received in response to an initial SYN),
* the RST is acceptable if the ACK field acknowledges the SYN.
*/
- if (state == TCP_ST_SYN_SENT) {
+ if (state == TLE_TCP_ST_SYN_SENT) {
rc = ((flags & TCP_FLAG_ACK) == 0 ||
si->ack != s->tcb.snd.nxt) ?
-ERANGE : 0;
@@ -1178,8 +1195,10 @@ rx_rst(struct tle_tcp_stream *s, uint32_t state, uint32_t flags,
else
rc = check_seqn(&s->tcb, si->seq, 0);
- if (rc == 0)
+ if (rc == 0) {
+ s->err.rev |= TLE_TCP_REV_RST;
stream_term(s);
+ }
return rc;
}
@@ -1534,12 +1553,12 @@ rx_ackfin(struct tle_tcp_stream *s)
empty_tq(s);
state = s->tcb.state;
- if (state == TCP_ST_LAST_ACK)
+ if (state == TLE_TCP_ST_LAST_ACK)
stream_term(s);
- else if (state == TCP_ST_FIN_WAIT_1) {
+ else if (state == TLE_TCP_ST_FIN_WAIT_1) {
timer_stop(s);
- s->tcb.state = TCP_ST_FIN_WAIT_2;
- } else if (state == TCP_ST_CLOSING) {
+ s->tcb.state = TLE_TCP_ST_FIN_WAIT_2;
+ } else if (state == TLE_TCP_ST_CLOSING) {
stream_timewait(s, s->tcb.snd.rto_tw);
}
}
@@ -1581,7 +1600,7 @@ rx_synack(struct tle_tcp_stream *s, uint32_t ts, uint32_t state,
struct tle_tcp_syn_opts so;
struct rte_tcp_hdr *th;
- if (state != TCP_ST_SYN_SENT)
+ if (state != TLE_TCP_ST_SYN_SENT)
return -EINVAL;
/*
@@ -1628,7 +1647,7 @@ rx_synack(struct tle_tcp_stream *s, uint32_t ts, uint32_t state,
rsp->flags |= TCP_FLAG_ACK;
timer_stop(s);
- s->tcb.state = TCP_ST_ESTABLISHED;
+ s->tcb.state = TLE_TCP_ST_ESTABLISHED;
rte_smp_wmb();
if (s->tx.ev != NULL)
@@ -1704,7 +1723,8 @@ rx_stream(struct tle_tcp_stream *s, uint32_t ts,
i += (ret > 0);
/* normal data/ack packets */
- } else if (state >= TCP_ST_ESTABLISHED && state <= TCP_ST_LAST_ACK) {
+ } else if (state >= TLE_TCP_ST_ESTABLISHED &&
+ state <= TLE_TCP_ST_LAST_ACK) {
/* process incoming data packets. */
dack_info_init(&tack, &s->tcb);
@@ -1807,7 +1827,7 @@ rx_postsyn(struct tle_dev *dev, struct stbl *st, uint32_t type, uint32_t ts,
k = 0;
state = s->tcb.state;
- if (state == TCP_ST_LISTEN) {
+ if (state == TLE_TCP_ST_LISTEN) {
/* one connection per flow */
cs = NULL;
@@ -1997,6 +2017,8 @@ tle_tcp_stream_rx_bulk(struct tle_stream *ts, struct rte_mbuf *pkt[],
return 0;
}
+ tms = tcp_stream_adjust_tms(s, tms);
+
/* extract packet info and check the L3/L4 csums */
for (i = 0; i != num; i++) {
get_pkt_info(pkt[i], &pi[i], &si[i]);
@@ -2222,8 +2244,8 @@ tle_tcp_stream_connect(struct tle_stream *ts, const struct sockaddr *addr)
return -EINVAL;
if (tcp_stream_try_acquire(s) > 0) {
- rc = rte_atomic16_cmpset(&s->tcb.state, TCP_ST_CLOSED,
- TCP_ST_SYN_SENT);
+ rc = rte_atomic16_cmpset(&s->tcb.state, TLE_TCP_ST_CLOSED,
+ TLE_TCP_ST_SYN_SENT);
rc = (rc == 0) ? -EDEADLK : 0;
} else
rc = -EINVAL;
@@ -2234,7 +2256,7 @@ tle_tcp_stream_connect(struct tle_stream *ts, const struct sockaddr *addr)
}
/* fill stream, prepare and transmit syn pkt */
- s->tcb.uop |= TCP_OP_CONNECT;
+ s->tcb.uop |= TLE_TCP_OP_CONNECT;
rc = tx_syn(s, addr);
tcp_stream_release(s);
@@ -2252,14 +2274,25 @@ tle_tcp_stream_connect(struct tle_stream *ts, const struct sockaddr *addr)
static inline void
tcb_establish(struct tle_tcp_stream *s, const struct tle_tcp_conn_info *ci)
{
- uint32_t tms;
+ uint32_t mss, tms;
tms = tcp_get_tms(s->s.ctx->cycles_ms_shift);
+ /* set a default MSS if it is unset (0) */
+ if ((ci->so.mss == 0) && (s->s.type == TLE_V4)) {
+ mss = calc_smss(TCP4_MIN_MSS, &s->tx.dst);
+ }
+ else if ((ci->so.mss == 0) && (s->s.type == TLE_V6)) {
+ mss = calc_smss(TCP6_MIN_MSS, &s->tx.dst);
+ }
+ else {
+ mss = calc_smss(ci->so.mss, &s->tx.dst);
+ }
+
s->tcb.so = ci->so;
- fill_tcb_snd(&s->tcb, ci->seq, ci->ack, ci->so.mss,
+ fill_tcb_snd(&s->tcb, ci->ack, ci->seq, mss,
ci->wnd, ci->so.wscale, &ci->so.ts);
- fill_tcb_rcv(&s->tcb, ci->seq, ci->so.wscale, &ci->so.ts);
+ fill_tcb_rcv(&s->tcb, ci->ack, ci->so.l_wscale, &ci->so.ts);
s->tcb.rcv.wnd = calc_rx_wnd(s, s->tcb.rcv.wscale);
@@ -2267,16 +2300,19 @@ tcb_establish(struct tle_tcp_stream *s, const struct tle_tcp_conn_info *ci)
s->tcb.snd.cwnd = initial_cwnd(s->tcb.snd.mss, s->tcb.snd.cwnd);
s->tcb.snd.ssthresh = s->tcb.snd.wnd;
+ /* calculate and store real timestamp offset */
+ if (ci->so.ts.raw != 0) {
+ s->ts_offset = tms - ci->so.ts.ecr;
+ tms -= s->ts_offset;
+ }
+
estimate_stream_rto(s, tms);
}
-/*
- * !!! add flgs to distinguish - add or not stream into the table.
- */
struct tle_stream *
tle_tcp_stream_establish(struct tle_ctx *ctx,
const struct tle_tcp_stream_param *prm,
- const struct tle_tcp_conn_info *ci)
+ const struct tle_tcp_conn_info *ci, uint32_t flags)
{
int32_t rc;
struct tle_tcp_stream *s;
@@ -2295,7 +2331,7 @@ tle_tcp_stream_establish(struct tle_ctx *ctx,
}
do {
- s->tcb.uop |= TCP_OP_ESTABLISH;
+ s->tcb.uop |= TLE_TCP_OP_ESTABLISH;
/* check and use stream addresses and parameters */
rc = tcp_stream_fill_prm(s, prm);
@@ -2308,16 +2344,18 @@ tle_tcp_stream_establish(struct tle_ctx *ctx,
break;
/* add the stream to the stream table */
- st = CTX_TCP_STLB(s->s.ctx);
- s->ste = stbl_add_stream_lock(st, s);
- if (s->ste == NULL) {
- rc = -ENOBUFS;
- break;
+ if ((flags & TLE_TCP_STREAM_F_PRIVATE) == 0) {
+ st = CTX_TCP_STLB(s->s.ctx);
+ s->ste = stbl_add_stream_lock(st, s);
+ if (s->ste == NULL) {
+ rc = -ENOBUFS;
+ break;
+ }
}
/* fill TCB from user provided data */
tcb_establish(s, ci);
- s->tcb.state = TCP_ST_ESTABLISHED;
+ s->tcb.state = TLE_TCP_ST_ESTABLISHED;
tcp_stream_up(s);
} while (0);
@@ -2463,7 +2501,7 @@ tle_tcp_stream_send(struct tle_stream *ts, struct rte_mbuf *pkt[], uint16_t num)
}
state = s->tcb.state;
- if (state != TCP_ST_ESTABLISHED && state != TCP_ST_CLOSE_WAIT) {
+ if (state != TLE_TCP_ST_ESTABLISHED && state != TLE_TCP_ST_CLOSE_WAIT) {
rte_errno = ENOTCONN;
tcp_stream_release(s);
return 0;
@@ -2567,7 +2605,7 @@ tle_tcp_stream_writev(struct tle_stream *ts, struct rte_mempool *mp,
}
state = s->tcb.state;
- if (state != TCP_ST_ESTABLISHED && state != TCP_ST_CLOSE_WAIT) {
+ if (state != TLE_TCP_ST_ESTABLISHED && state != TLE_TCP_ST_CLOSE_WAIT) {
rte_errno = ENOTCONN;
tcp_stream_release(s);
return -1;
@@ -2663,8 +2701,8 @@ tx_data_fin(struct tle_tcp_stream *s, uint32_t tms, uint32_t state)
tx_nxt_data(s, tms);
/* we also have to send a FIN */
- if (state != TCP_ST_ESTABLISHED &&
- state != TCP_ST_CLOSE_WAIT &&
+ if (state != TLE_TCP_ST_ESTABLISHED &&
+ state != TLE_TCP_ST_CLOSE_WAIT &&
tcp_txq_nxt_cnt(s) == 0 &&
s->tcb.snd.fss != s->tcb.snd.nxt) {
s->tcb.snd.fss = ++s->tcb.snd.nxt;
@@ -2679,18 +2717,23 @@ tx_stream(struct tle_tcp_stream *s, uint32_t tms)
state = s->tcb.state;
- if (state == TCP_ST_SYN_SENT) {
+ if (state == TLE_TCP_ST_SYN_SENT) {
/* send the SYN, start the rto timer */
send_ack(s, tms, TCP_FLAG_SYN);
timer_start(s);
- } else if (state >= TCP_ST_ESTABLISHED && state <= TCP_ST_LAST_ACK) {
+ } else if (state >= TLE_TCP_ST_ESTABLISHED &&
+ state <= TLE_TCP_ST_LAST_ACK) {
tx_data_fin(s, tms, state);
/* start RTO timer. */
if (s->tcb.snd.nxt != s->tcb.snd.una)
timer_start(s);
+ } else if (state == TLE_TCP_ST_CLOSED) {
+ if ((s->tcb.snd.close_flags & TCP_FLAG_RST) != 0)
+ send_rst(s, s->tcb.snd.nxt);
+ stream_term(s);
}
}
@@ -2718,7 +2761,8 @@ rto_stream(struct tle_tcp_stream *s, uint32_t tms)
if (s->tcb.snd.nb_retx < s->tcb.snd.nb_retm) {
- if (state >= TCP_ST_ESTABLISHED && state <= TCP_ST_LAST_ACK) {
+ if (state >= TLE_TCP_ST_ESTABLISHED &&
+ state <= TLE_TCP_ST_LAST_ACK) {
/* update SND.CWD and SND.SSTHRESH */
rto_cwnd_update(&s->tcb);
@@ -2733,7 +2777,7 @@ rto_stream(struct tle_tcp_stream *s, uint32_t tms)
tx_data_fin(s, tms, state);
- } else if (state == TCP_ST_SYN_SENT) {
+ } else if (state == TLE_TCP_ST_SYN_SENT) {
/* resending SYN */
s->tcb.so.ts.val = tms;
@@ -2750,7 +2794,8 @@ rto_stream(struct tle_tcp_stream *s, uint32_t tms)
send_ack(s, tms, TCP_FLAG_SYN);
- } else if (state == TCP_ST_TIME_WAIT) {
+ } else if (state == TLE_TCP_ST_TIME_WAIT) {
+ s->err.rev |= TLE_TCP_REV_RTO;
stream_term(s);
}
@@ -2760,6 +2805,7 @@ rto_stream(struct tle_tcp_stream *s, uint32_t tms)
timer_restart(s);
} else {
+ s->err.rev |= TLE_TCP_REV_RTO;
send_rst(s, s->tcb.snd.nxt);
stream_term(s);
}
@@ -2787,7 +2833,7 @@ tle_tcp_process(struct tle_ctx *ctx, uint32_t num)
s = rs[i];
s->timer.handle = NULL;
if (tcp_stream_try_acquire(s) > 0)
- rto_stream(s, tms);
+ rto_stream(s, tcp_stream_adjust_tms(s, tms));
tcp_stream_release(s);
}
@@ -2801,7 +2847,7 @@ tle_tcp_process(struct tle_ctx *ctx, uint32_t num)
rte_atomic32_set(&s->tx.arm, 0);
if (tcp_stream_try_acquire(s) > 0)
- tx_stream(s, tms);
+ tx_stream(s, tcp_stream_adjust_tms(s, tms));
else
txs_enqueue(s->s.ctx, s);
tcp_stream_release(s);