1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
|
/*
* Copyright (c) 2016 Intel Corporation.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Some helper stream control functions definitions.
*/
#ifndef _TCP_CTL_H_
#define _TCP_CTL_H_
#include "tcp_stream.h"
#include "tcp_ofo.h"
#ifdef __cplusplus
extern "C" {
#endif
static inline void
tcp_stream_down(struct tle_tcp_stream *s)
{
rwl_down(&s->rx.use);
rwl_down(&s->tx.use);
}
static inline void
tcp_stream_up(struct tle_tcp_stream *s)
{
rwl_up(&s->rx.use);
rwl_up(&s->tx.use);
}
/* calculate RCV.WND value based on size of stream receive buffer */
static inline uint32_t
calc_rx_wnd(const struct tle_tcp_stream *s, uint32_t scale)
{
return s->rx.q->prod.mask << scale;
}
/* empty stream's receive queue */
static void
empty_rq(struct tle_tcp_stream *s)
{
empty_mbuf_ring(s->rx.q);
tcp_ofo_reset(s->rx.ofo);
}
/* empty stream's listen queue */
static void
empty_lq(struct tle_tcp_stream *s, struct stbl *st)
{
uint32_t i, n;
struct rte_mbuf *mb;
union pkt_info pi;
union seg_info si;
struct stbl_entry *se[MAX_PKT_BURST];
do {
n = rte_ring_dequeue_burst(s->rx.q, (void **)se, RTE_DIM(se));
for (i = 0; i != n; i++) {
mb = stbl_get_pkt(se[i]);
get_pkt_info(mb, &pi, &si);
stbl_del_pkt_lock(st, se[i], &pi);
rte_pktmbuf_free(mb);
}
} while (n != 0);
}
static inline void
tcp_stream_reset(struct tle_ctx *ctx, struct tle_tcp_stream *s)
{
struct stbl *st;
uint16_t uop;
st = CTX_TCP_STLB(ctx);
/* reset TX armed */
rte_atomic32_set(&s->tx.arm, 0);
/* reset TCB */
uop = s->tcb.uop & (TCP_OP_LISTEN | TCP_OP_CONNECT);
memset(&s->tcb, 0, sizeof(s->tcb));
/* reset cached destination */
memset(&s->tx.dst, 0, sizeof(s->tx.dst));
if (uop != 0) {
/* free stream's destination port */
stream_clear_ctx(ctx, &s->s);
if (uop == TCP_OP_LISTEN)
empty_lq(s, st);
}
if (s->ste != NULL) {
/* remove entry from RX streams table */
stbl_del_stream_lock(st, s->ste, s);
s->ste = NULL;
empty_rq(s);
}
/* empty TX queue */
empty_mbuf_ring(s->tx.q);
/*
* mark the stream as free again.
* if there still are pkts queued for TX,
* then put this stream to the tail of free list.
*/
put_stream(ctx, &s->s, TCP_STREAM_TX_FINISHED(s));
}
#ifdef __cplusplus
}
#endif
#endif /* _TCP_CTL_H_ */
|